repo_name
stringlengths
5
114
repo_url
stringlengths
24
133
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
directory_id
stringlengths
40
40
branch_name
stringclasses
209 values
visit_date
timestamp[ns]
revision_date
timestamp[ns]
committer_date
timestamp[ns]
github_id
int64
9.83k
683M
star_events_count
int64
0
22.6k
fork_events_count
int64
0
4.15k
gha_license_id
stringclasses
17 values
gha_created_at
timestamp[ns]
gha_updated_at
timestamp[ns]
gha_pushed_at
timestamp[ns]
gha_language
stringclasses
115 values
files
listlengths
1
13.2k
num_files
int64
1
13.2k
praveenredy12/chatbot
https://github.com/praveenredy12/chatbot
67fb3db51a96dee7c2ecf87ca45695c1c7ce05a9
7ed16c7e6c7f0aecf0530dead21cb93302bb59d9
e48f44da605a7b863744b46084f12c05ea758bc8
refs/heads/master
2023-02-10T05:46:51.951142
2021-01-06T11:56:42
2021-01-06T11:56:52
327,296,332
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6668867468833923, "alphanum_fraction": 0.680752694606781, "avg_line_length": 32.28571319580078, "blob_id": "b4ebd047a4fe9ff1a46351efb2ee42d9a903926d", "content_id": "5788b5325c848352556fb43379689b912de38a36", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3029, "license_type": "no_license", "max_line_length": 123, "num_lines": 91, "path": "/train_bot.py", "repo_name": "praveenredy12/chatbot", "src_encoding": "UTF-8", "text": "import nltk\nimport json\nimport pickle\nimport random\nimport numpy as np\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout\nfrom keras.optimizers import SGD\nfrom nltk.stem import WordNetLemmatizer\nfrom nltk.corpus import stopwords\n\nnltk.download('punkt')\nnltk.download('wordnet')\nnltk.download('stopwords')\n\ndata_file = open('response_data.json').read()\nintents = json.loads(data_file)\nSTOP_WORDS = set(stopwords.words('english'))\nIGNORE_WORDS = ['?', '!', ',']\nlemmatizer = WordNetLemmatizer()\nwords, classes, documents = [[], [], []]\n\n\nfor intent in intents['intents']:\n for pattern in intent['patterns']:\n # take each word and tokenize it\n w = [s for s in nltk.word_tokenize(pattern) if s not in STOP_WORDS]\n words.extend(w)\n documents.append((w, intent['tag']))\n if intent['tag'] not in classes:\n classes.append(intent['tag'])\n\nwords = [\n lemmatizer.lemmatize(w.lower()) for w in words if w not in IGNORE_WORDS\n]\nwords = sorted(list(set(words)))\nclasses = sorted(list(set(classes)))\n\npickle.dump(words, open('words.pkl', 'wb'))\npickle.dump(classes, open('classes.pkl', 'wb'))\n\ntraining = []\noutput_empty = [0] * len(classes)\nfor doc in documents:\n bag = []\n # list of tokenized words for the pattern\n pattern_words = doc[0]\n # lemmatize each word to create base word\n pattern_words = [\n lemmatizer.lemmatize(word.lower()) for word in pattern_words\n ]\n # create bag of words array with 1, if word match found in current pattern\n for w in words:\n bag.append(1) if w in pattern_words else bag.append(0)\n # output is '0' for each tag and '1' for current tag (for each pattern)\n output_row = list(output_empty)\n output_row[classes.index(doc[1])] = 1\n training.append([bag, output_row])\n\n# shuffle features and turn into np.array\nrandom.shuffle(training)\ntraining = np.array(training)\n# create train and test lists. X - patterns, Y - intents\ntrain_x = list(training[:, 0])\ntrain_y = list(training[:, 1])\nprint(\"Training data created\")\n\n# Create model - 3 layers. First layer 128 neurons, second layer 64 neurons and 3rd output layer contains number of neurons\n# equal to number of intents to predict output intent with softmax\nmodel = Sequential()\nmodel.add(Dense(128, input_shape=(len(train_x[0]), ), activation='relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(64, activation='relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(len(train_y[0]), activation='softmax'))\n\n# Compile model. Stochastic gradient descent with Nesterov accelerated gradient gives good results for this model\nsgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)\nmodel.compile(loss='categorical_crossentropy',\n optimizer=sgd,\n metrics=['accuracy'])\n\n# fitting and saving the model\nhist = model.fit(np.array(train_x),\n np.array(train_y),\n epochs=200,\n batch_size=5,\n verbose=1)\nmodel.save('chatbot_model.h5', hist)\n\nprint(\"model created\")\n" }, { "alpha_fraction": 0.5938215255737305, "alphanum_fraction": 0.5991609692573547, "avg_line_length": 32.0379753112793, "blob_id": "094e43a12a246576b7dd100e025ac55cfd04247c", "content_id": "6945ba9318bc2bdc4205dbb36a90555113f1cc66", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2622, "license_type": "no_license", "max_line_length": 96, "num_lines": 79, "path": "/chatbot/chatbot.py", "repo_name": "praveenredy12/chatbot", "src_encoding": "UTF-8", "text": "import nltk\nimport json\nimport random\nimport train_bot\nimport pickle\nimport numpy as np\nfrom nltk.corpus import stopwords\nfrom nltk.stem import WordNetLemmatizer\nfrom keras.models import load_model\n\nlemmatizer = WordNetLemmatizer()\nmodel = load_model('chatbot_model.h5')\n\nintents = json.loads(open('response_data.json').read())\nwords = pickle.load(open('words.pkl', 'rb'))\nclasses = pickle.load(open('classes.pkl', 'rb'))\nstop_words = set(stopwords.words('english'))\n\n\nclass ChatBot:\n\n def __init__(self, text):\n self.user_text = text\n\n def get_response(self):\n ints = self.predict_class(self.user_text)\n res = self.get_processed_output(ints)\n return res\n\n def predict_class(self, sentence):\n # filter out predictions below a threshold\n p = self.bow(sentence)\n res = model.predict(np.array([p]))[0]\n error_threshold = 0.25\n results = [[i, r] for i, r in enumerate(res) if r > error_threshold]\n # sort by strength of probability\n results.sort(key=lambda x: x[1], reverse=True)\n return_list = []\n for r in results:\n return_list.append({\"intent\": classes[r[0]], \"probability\": str(r[1])})\n return return_list\n\n def get_processed_output(self, ints):\n tag = ints[0]['intent']\n list_of_intents = intents['intents']\n for i in list_of_intents:\n if i['tag'] == tag:\n return random.choice(i['responses'])\n\n def bow(self, sentence):\n # return bag of words array: 0 or 1 for each word in the bag that exists in the sentence\n # tokenize the pattern\n sentence_words = self.clean_up_sentence(sentence)\n # bag of words - matrix of N words, vocabulary matrix\n bag = [0] * len(words)\n for s in sentence_words:\n for i, w in enumerate(words):\n if w == s:\n # assign 1 if current word is in the vocabulary position\n bag[i] = 1\n return np.array(bag)\n\n def clean_up_sentence(self,sentence):\n sentence_words = nltk.word_tokenize(sentence)\n sentence_words = [\n lemmatizer.lemmatize(word.lower()) for word in sentence_words\n ]\n return sentence_words\n\n\nif __name__ == '__main__':\n flag = True\n while flag:\n user_text = input('you: ').lower()\n if 'thanks' in user_text or 'thank you' in user_text or 'bye' in user_text:\n print(f'Chatbot: {ChatBot(user_text).get_response()}')\n flag = False\n else:\n print(f'Chatbot: {ChatBot(user_text).get_response()}')\n\n\n\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.6581469774246216, "alphanum_fraction": 0.6581469774246216, "avg_line_length": 16.38888931274414, "blob_id": "a8dc05774c7515c205c74920d11ea84de8fef6e9", "content_id": "48fd100425344055b4776e40ab8448f07d60d419", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 313, "license_type": "no_license", "max_line_length": 43, "num_lines": 18, "path": "/nlp_res.py", "repo_name": "praveenredy12/chatbot", "src_encoding": "UTF-8", "text": "from flask import Flask\nfrom flask_restful import Resource, Api\nfrom chatbot import ChatBot\n\napp = Flask(__name__)\napi = Api(app)\n\n\nclass Response(Resource):\n def get(self,text):\n return ChatBot(text).get_response()\n\n\napi.add_resource(Response, '/res/<text>')\n\n\nif __name__ == '__main__':\n app.run()\n" } ]
3
novag/pydel
https://github.com/novag/pydel
76d6befcb717fe297fcc77fdd549d1f9fddbc346
4fa5e225c8d9a38635977ec1b262855a95670ba8
aa4bdf7d90e869fbdd30a0c2e974723520fddfd7
refs/heads/master
2021-01-15T09:28:28.724550
2017-05-12T18:47:21
2017-05-12T18:47:21
59,915,519
0
1
null
2016-05-28T21:40:27
2016-05-19T13:40:33
2015-12-23T23:50:22
null
[ { "alpha_fraction": 0.6317689418792725, "alphanum_fraction": 0.6425992846488953, "avg_line_length": 22.16666603088379, "blob_id": "6bae29e9a0d2b47c9c652aa2b78916e02554fe95", "content_id": "02f3a86056f7c4e8e3c12eedde3bbaeaa06616e7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 277, "license_type": "permissive", "max_line_length": 62, "num_lines": 12, "path": "/setup.py", "repo_name": "novag/pydel", "src_encoding": "UTF-8", "text": "from setuptools import setup\n\nsetup(\n name='pydel',\n version='1.0.0',\n description='Python implementation of the Jodel protocol',\n url='https://github.com/rolsdorph/pydel',\n author='Mads Rolsdorph',\n author_email='m.rolsdorph@gmail.com',\n license='MIT',\n packages=['pydel']\n)" }, { "alpha_fraction": 0.6626505851745605, "alphanum_fraction": 0.6626505851745605, "avg_line_length": 25.341463088989258, "blob_id": "c76334402111d5447428335b841aa1b5943906e4", "content_id": "b3fff9268ad8a6dfe668be5cfa42a08590edd067", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1079, "license_type": "permissive", "max_line_length": 77, "num_lines": 41, "path": "/pydel/pydel_exceptions.py", "repo_name": "novag/pydel", "src_encoding": "UTF-8", "text": "class Error(Exception):\n pass\n\n\nclass AuthenticationError(Error):\n def __init__(self, message, *args):\n self.message = message\n\n super(AuthenticationError, self).__init__(message, *args)\n\n\nclass UnexpectedResponseCodeException(Error):\n def __init__(self, message, *args):\n self.message = message\n\n super(UnexpectedResponseCodeException, self).__init__(message, *args)\n\n\nclass InvalidPostException(Error):\n def __init__(self, message, json_dict, *args):\n self.message = message\n self.json_dict = json_dict\n\n super(InvalidPostException, self).__init__(message, *args)\n\n\nclass NoPydelInstanceException(Error):\n def __init__(self, *args):\n super(NoPydelInstanceException, self).__init__(*args)\n\n\nclass UnauthorizedDeletionException(Error):\n def __init__(self, post_id, *args):\n self.post_id = post_id\n\n super(UnauthorizedDeletionException, self).__init__(*args)\n\n\nclass UnauthenticatedException(Error):\n def __init__(self, *args):\n super(UnauthenticatedException, self).__init__(*args)" }, { "alpha_fraction": 0.7225568294525146, "alphanum_fraction": 0.7427628040313721, "avg_line_length": 47.556602478027344, "blob_id": "80117544920f5ba9893a12adb58cb8b40ec6ff9a", "content_id": "ed4c1eaa831bf681ae92ba1655d84ef4241d7a4e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5147, "license_type": "permissive", "max_line_length": 182, "num_lines": 106, "path": "/README.md", "repo_name": "novag/pydel", "src_encoding": "UTF-8", "text": "# Pydel\n\nA Python implementation of the Jodel protocol.\n\n## Installation\n\nAfter obtaining a copy of this repository, navigate to the directory and install it the same way you would install any\nother Python package:\n\n```\npip install -r requirements.txt\npython setup.py install\n```\n\nI highly recommend installing to a [virtual environment](https://virtualenv.readthedocs.org/).\n\n## Usage\n\nBelow is a brief overview of the methods available. Please refer to the documentation for more details, including which\nexceptions might be raised.\n\n### Authenticating\n\nThe Pydel class is used to communicate with the Jodel API. Its constructor takes 7 arguments:\n - city: The name of the city you are Jodling from\n - country code: The [country code](https://en.wikipedia.org/wiki/Country_code) for the country you are Jodling from\n - lat: Latitude of the location you're Jodling from\n - lng: Longitude of the location you're Jodling from\n - device_uid: A 64 character string consisting of numbers and lowercase numbers. The device UID is your \"username\", used to identify you.\n - (optional) user_agent_string: The user agent string you want to use. Default is \"Jodel/65000 Dalvik/2.1.0 (Linux; U; Android 5.0; SM-G900F Build/LRX21T)\"\n\n```\nfrom pydel import Pydel\nimport pydel.colors\nfrom pydel.utils import random_device_uid\n\nuid = random_device_uid()\n\np = Pydel(city='Trondheim', country_code='NO', lat=60.0, lng=10.0, device_uid=device_uid, user_agent_string='Jodel/65000 Dalvik/2.1.0 (Linux; U; Android 5.0; SM-G900F Build/LRX21T)')\np.authenticate() # Authenticate with the server\n```\n\nNote that authenticate() must be called before doing anything else.\n\n### Fetching data\n\nget_karma() will return your karma as an integer value. Pydel also implements several public methods that you can use to\nfetch posts: get_my_recent_posts(), get_my_popular_posts(), get_my_discussed_posts(), get_my_replies(), get_my_votes(),\nget_recent_posts(), get_popular_posts() and get_discussed_posts(). These will all return a list of Post instances.\n\n```\nkarma = p.get_karma() # 42\ntop_jodels = p.get_popular_posts() # [<pydel.Post instance at 0x7f798e7e9c20>, <pydel.Post instance at 0x7f798e7e9b00>, ...]\n```\n\n### Sending data\nPydel supports voting, replying and posting new jodels:\n\n - upvote_post/downvote_post(post) takes a Post instance and upvotes/downvotes it. Returns False if the user currently\n logged in has already voted on this post, True if the vote request was sent.\n - delete_post(post) attempts to delete the post associated with the given Post instance. Returns True if the request is\n sent without encountering any exceptions.\n - new_post(color, message) posts a new Jodel with the given color and message. Please note that the server will\n discard any non-Jodel colors.\n - new_reply(message, post) posts a reply containing message as a response to the given post.\n\n```\np.new_post(color=pydel.colors.RED, message=\"I just love this app!\") # [<pydel.Post instance at 0x7f798e7e9c20>, <pydel.Post instance at 0x7f798e7e9b00>, ...]\n```\n\n### Colors\nAll colors are specified as a six character hexadecimal string. The options accepted by the server are\n - FF9908 (orange)\n - FFBA00 (yellow)\n - DD5F5F (red)\n - 06A3CB (blue)\n - 8ABDB0 (bluegreyish)\n - 9EC41C (green)\n\nThese can also be found as ORANGE, YELLOW, RED, BLUE, BLUEGREY and GREEN in pydel.colors:\n\n```\nprint(pydel.colors.ORANGE) # FF9908\n```\n\n### Post properties\nA Post instance has methods upvote(), downvote() and reply(message). These behave just like the Pydel methods\ndescribed above. In addition, it has the following properties:\n - voted (str): \"up\"/\"down\" if the user fetching the post has voted on the post. None if the user has not voted.\n - vote_count (int): Signed integer indicating how many votes the post has.\n - has_replies (bool): True if the post has replies, False if it does not.\n - reply_from_op (bool): True if the post was made by someone replying to their own thread.\n - replies (list): List of Post objects representing the replies to this post. Empty list if there are no replies.\n - reply_count (int): The number of replies to this post.\n - is_image (bool): True if the post contains an image, False if it does not.\n - image_url (str): None if the post doesn't contain an image, AWS url if it does.\n - thumbnail_url (str): None if the post doesn't contain an image, AWS url if it does.\n - created_at (datetime): Time the post was created.\n - updated_at (datetime): Time the post was last updated (seems to always be the same as created_at).\n - own_post (boolean): True if the post was written by the user who fetched it, False if it was not.\n - distance (int): Euclidean post distance in kilometers (very_close: 0..1, close: 2..10, city_name: 11+).\n - location (dict): Dictionary mapping 'lat', 'lng' and 'name' to latitude, longitude and name.\n - message (str): The contents of the post. Empty string it no message is found.\n - color (str): Six character string describing the color of the post. FFFFFF if no color is found.\n - post_id (str): Alphanumeric string identifying the post.\n - user_handle (str): Alphanumeric string identifying a user in the current thread.\n" }, { "alpha_fraction": 0.5929043292999268, "alphanum_fraction": 0.602491021156311, "avg_line_length": 36.742164611816406, "blob_id": "f1f66f68f4e6c83784a18158a9e19ccadf30e8be", "content_id": "3045f669773513f3a9487469a38d97757e35d58d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 26495, "license_type": "permissive", "max_line_length": 168, "num_lines": 702, "path": "/pydel/__init__.py", "repo_name": "novag/pydel", "src_encoding": "UTF-8", "text": "from pydel.pydel_exceptions import (AuthenticationError, UnexpectedResponseCodeException, InvalidPostException,\n NoPydelInstanceException, UnauthorizedDeletionException, UnauthenticatedException)\nfrom pydel import colors, utils\n\nimport requests\nimport time\n\nDEFAULT_USER_AGENT_STRING = 'Jodel/65000 Dalvik/2.1.0 (Linux; U; Android 5.0; SM-G900F Build/LRX21T)'\nBASE_API_URL = 'https://api.go-tellm.com/api/v2/'\n\n\nclass Pydel:\n def __init__(self, city, country_code, lat, lng, device_uid=None, user_agent_string=DEFAULT_USER_AGENT_STRING, debug=False):\n self._location = {\n 'city': city,\n 'country': country_code,\n 'loc_accuracy': utils.random_loc_accuracy(),\n 'loc_coordinates': {\n 'lat': lat,\n 'lng': lng\n },\n 'name': city\n }\n self._device_uid = device_uid\n self._user_agent_string = user_agent_string\n self._debug = debug\n\n self._access_token = None\n self._distinct_id = None\n self._expiration_date = None\n self._refresh_token = None\n\n def _generate_headers(self):\n return {'User-Agent': self._user_agent_string,\n 'Authorization': \"Bearer {}\".format(self._access_token),\n 'Accept-Encoding': 'gzip'\n }\n\n def _authenticated_request(self, method, url, json_data=None, data=None):\n if self._access_token is None:\n raise UnauthenticatedException()\n\n if self._expiration_date is not None and self._expiration_date < time.time(): # Our access token has expired\n self.authenticate()\n\n req = requests.request(method=method, url=BASE_API_URL + url, headers=self._generate_headers(), json=json_data,\n data=data)\n\n if self._debug:\n print(\"_authenticated_request: \" + req.text)\n\n if req.status_code == requests.codes.ok or req.status_code == requests.codes.no_content:\n return req\n else:\n raise UnexpectedResponseCodeException(\"Server responded with {}\".format(req.status_code))\n\n def _new_post(self, color, message):\n \"\"\"\n Posts a new Jodel.\n\n Args:\n color: Post color, hexadecimal without leading #. Can be FF9908 (orange), FFBA00 (yellow), DD5F5F (red), 06A3CB (blue), 8ABDB0 (bluegreyish), 9EC41C (green)\n message: Content of the post\n\n Returns:\n Request object\n\n Raises:\n AuthenticationError: An attempt to replace an outdated auth token failed.\n UnexpectedResponseCodeException: The server responded with an unexpected HTTP status code (that is, not 200 or 204)\n \"\"\"\n return self._authenticated_request(method='POST', url='posts',\n json_data={\n 'color': color,\n 'location': self._location,\n 'message': message})\n\n def _reply_to_post_id(self, color, message, post_id):\n \"\"\"\n Posts a reply to a Jodel.\n\n Args:\n color: Post color, hexadecimal without leading #. Can be FF9908 (orange), FFBA00 (yellow), DD5F5F (red), 06A3CB (blue), 8ABDB0 (bluegreyish), 9EC41C (green)\n message: Content of the post\n post_id: Id of the post to reply to\n\n Returns:\n Request object\n\n Raises:\n AuthenticationError: An attempt to replace an outdated auth token failed.\n UnexpectedResponseCodeException: The server responded with an unexpected HTTP status code (that is, not 200 or 204)\n \"\"\"\n return self._authenticated_request(method='POST', url='posts',\n json_data={\n 'ancestor': post_id,\n 'color': color,\n 'location': self._location,\n 'message': message})\n\n def _delete_post_id(self, post_id):\n return self._authenticated_request(method='DELETE', url=\"posts/{}\".format(post_id))\n\n def _vote_post_id(self, post_id, direction):\n \"\"\"\n Upvotes or downvotes a jodel.\n\n Args:\n post_id: id of the post to vote.\n direction: \"up\" for upvote, \"down\" for downvote.\n\n Returns:\n Request object.\n\n Raises:\n AuthenticationError: An attempt to replace an outdated auth token failed.\n UnexpectedResponseCodeException: The server responded with an unexpected HTTP status code (that is, not 200 or 204)\n \"\"\"\n return self._authenticated_request(method='PUT', url=\"posts/{}/{}vote\".format(post_id, direction))\n\n def get_device_uid(self):\n return self._device_uid\n\n def set_token(self, access_token):\n self._access_token = access_token\n return True\n\n def authenticate(self):\n \"\"\"\n Authenticates with the Jodel server, then sleeps for 5 seconds.\n\n Returns:\n True on success.\n\n Raises:\n AuthenticationError on failure to authenticate (typically, the server not returning HTTP 200 or 204).\n \"\"\"\n req = requests.post(BASE_API_URL + 'users',\n headers={'User-Agent': self._user_agent_string,\n 'Accept-Encoding': 'gzip',\n 'Content-Type': 'application/json; charset=UTF-8'},\n json={'client_id': '81e8a76e-1e02-4d17-9ba0-8a7020261b26',\n 'device_uid': self._device_uid,\n 'location': self._location}\n )\n\n if self._debug:\n print(\"authenticate: \" + req.text)\n\n if req.status_code == requests.codes.ok:\n self._access_token = req.json()['access_token']\n self._distinct_id = req.json()['distinct_id']\n self._expiration_date = req.json()['expiration_date']\n self._refresh_token = req.json()['refresh_token']\n\n time.sleep(5) # Workaround for certain actions being disabled for x seconds after authentication\n\n return True\n\n else:\n raise AuthenticationError(\"Server returned {}\".format(req.status_code))\n\n def set_location(self, city=None, lat=None, lng=None, country_code=None, loc_name=None, loc_accuracy=None, force=False):\n \"\"\"\n Sets the current location.\n\n Args:\n city: City name\n lat: Latitude of position to post from\n lng: Longitude of position to post from\n country_code: 2 or 3 capital letter country code\n loc_name: Human-friendly name of position to post from\n loc_accuracy: Location accuracy\n\n Returns:\n True if location modified, False if not\n \"\"\"\n modified = False\n\n if city and city != self._location['city']:\n self._location['city'] = city\n modified = True\n\n if lat and lat != self._location['loc_coordinates']['lat']:\n self._location['loc_coordinates']['lat'] = lat\n modified = True\n\n if lng and lng != self._location['loc_coordinates']['lng']:\n self._location['loc_coordinates']['lng'] = lng\n modified = True\n\n if country_code and country_code != self._location['country']:\n self._location['country'] = country_code\n modified = True\n\n if loc_name and loc_name != self._location['name']:\n self._location['name'] = loc_name\n modified = True\n\n if loc_accuracy and loc_accuracy != self._location['loc_accuracy']:\n self._location['loc_accuracy'] = loc_accuracy\n modified = True\n\n if modified or force:\n self._authenticated_request(method='PUT', url='users/location', json_data={'location': self._location}).text\n modified = True\n\n return modified\n\n def get_karma(self):\n \"\"\"\n Returns karma for the currently logged in user.\n\n Returns:\n Karma as an integer.\n\n Raises:\n AuthenticationError: An attempt to replace an outdated auth token failed.\n UnexpectedResponseCodeException: The server responded with an unexpected HTTP status code (that is, not 200 or 204)\n \"\"\"\n return int(self._authenticated_request(method='GET', url='/users/karma').json()['karma'])\n\n def get_my_recent_posts(self):\n \"\"\"\n Returns the posts of the currently logged in user.\n\n Returns:\n list of Post objects.\n\n Raises:\n AuthenticationError: An attempt to replace an outdated auth token failed.\n UnexpectedResponseCodeException: The server responded with an unexpected HTTP status code (that is, not 200 or 204)\n \"\"\"\n return generate_post_list(self._authenticated_request(method='GET', url='posts/mine/').json()['posts'], self)\n\n def get_my_popular_posts(self):\n \"\"\"\n Returns the highest voted posts of the currently logged in user.\n\n Returns:\n list of Post objects.\n\n Raises:\n AuthenticationError: An attempt to replace an outdated auth token failed.\n UnexpectedResponseCodeException: The server responded with an unexpected HTTP status code (that is, not 200 or 204)\n \"\"\"\n return generate_post_list(\n self._authenticated_request(method='GET', url='posts/mine/popular').json()['posts'], self)\n\n def get_my_discussed_posts(self):\n \"\"\"\n Returns the most commented posts of the currently logged in user.\n\n Returns:\n list of Post objects.\n\n Raises:\n AuthenticationError: An attempt to replace an outdated auth token failed.\n UnexpectedResponseCodeException: The server responded with an unexpected HTTP status code (that is, not 200 or 204)\n \"\"\"\n return generate_post_list(\n self._authenticated_request(method='GET', url='posts/mine/discussed').json()['posts'], self)\n\n def get_my_replies(self):\n \"\"\"\n Returns the replies of the currently logged in user.\n\n Returns:\n list of Post objects.\n\n Raises:\n AuthenticationError: An attempt to replace an outdated auth token failed.\n UnexpectedResponseCodeException: The server responded with an unexpected HTTP status code (that is, not 200 or 204)\n \"\"\"\n return generate_post_list(\n self._authenticated_request(method='GET', url='posts/mine/replies').json()['posts'], self)\n\n def get_my_votes(self):\n \"\"\"\n Returns posts the currently logged in user has voted on.\n\n Returns:\n list of Post objects.\n\n Raises:\n AuthenticationError: An attempt to replace an outdated auth token failed.\n UnexpectedResponseCodeException: The server responded with an unexpected HTTP status code (that is, not 200 or 204)\n \"\"\"\n return generate_post_list(\n self._authenticated_request(method='GET', url='posts/mine/votes').json()['posts'], self)\n\n def get_recent_posts(self, lat=None, lng=None, limit=30):\n \"\"\"\n Returns most recent posts near the current position.\n\n Args:\n lat: Latitude of position to get posts from\n lng: Longitude of position to get post from\n limit: Number of posts to get\n\n Returns:\n list of Post objects.\n\n Raises:\n AuthenticationError: An attempt to replace an outdated auth token failed.\n UnexpectedResponseCodeException: The server responded with an unexpected HTTP status code (that is, not 200 or 204)\n \"\"\"\n self.set_location(lat=lat, lng=lng)\n params = '?limit=' + str(limit)\n if (lat and lng):\n params += '&lat=' + str(lat) + '&lng=' + str(lng)\n\n return generate_post_list(self._authenticated_request(method='GET', url='posts/location' + params).json()['posts'], self)\n\n def get_popular_posts(self, lat=None, lng=None, limit=30):\n \"\"\"\n Returns highest voted posts near the current position.\n\n Args:\n lat: Latitude of position to get posts from\n lng: Longitude of position to get post from\n limit: Number of posts to get\n\n Returns:\n list of Post objects.\n\n Raises:\n AuthenticationError: An attempt to replace an outdated auth token failed.\n UnexpectedResponseCodeException: The server responded with an unexpected HTTP status code (that is, not 200 or 204)\n \"\"\"\n self.set_location(lat=lat, lng=lng)\n params = '?limit=' + str(limit)\n if (lat and lng):\n params += '&lat=' + str(lat) + '&lng=' + str(lng)\n\n return generate_post_list(\n self._authenticated_request(method='GET', url='posts/location/popular' + params).json()['posts'], self)\n\n def get_discussed_posts(self, lat=None, lng=None, limit=30):\n \"\"\"\n Returns most commented posts near the current position.\n\n Args:\n lat: Latitude of position to get posts from\n lng: Longitude of position to get post from\n limit: Number of posts to get\n\n Returns:\n list of Post objects.\n\n Raises:\n AuthenticationError: An attempt to replace an outdated auth token failed.\n UnexpectedResponseCodeException: The server responded with an unexpected HTTP status code (that is, not 200 or 204)\n \"\"\"\n self.set_location(lat=lat, lng=lng)\n params = '?limit=' + str(limit)\n if (lat and lng):\n params += '&lat=' + str(lat) + '&lng=' + str(lng)\n\n return generate_post_list(\n self._authenticated_request(method='GET', url='posts/location/discussed' + params).json()['posts'], self)\n\n def get_post(self, post_id):\n \"\"\"\n Returns a specific Jodel post.\n\n Args:\n post_id: Alphanumeric string identifying the post\n\n Returns:\n Post object.\n\n Raises:\n AuthenticationError: An attempt to replace an outdated auth token failed.\n UnexpectedResponseCodeException: The server responded with an unexpected HTTP status code (that is, not 200 or 204)\n \"\"\"\n return Post(self._authenticated_request(method='GET', url='posts/{}'.format(post_id)).json(), self)\n\n def new_post(self, color, message):\n \"\"\"\n Posts a new Jodel, using current position and a randomized location accuracy.\n\n Args:\n color: Post color, hexadecimal without leading #. Can be FF9908 (orange), FFBA00 (yellow), DD5F5F (red), 06A3CB (blue), 8ABDB0 (bluegreyish), 9EC41C (green)\n message: Content of the post.\n\n Returns:\n List of Post objects containing the newest posts near the current position.\n\n Raises:\n AuthenticationError: An attempt to replace an outdated auth token failed.\n UnexpectedResponseCodeException: The server responded with an unexpected HTTP status code (that is, not 200 or 204)\n \"\"\"\n return generate_post_list(self._new_post(color=color, message=message).json()['posts'], self)\n\n def new_reply(self, message, post):\n \"\"\"\n Posts a reply, using current position and a randomized location accuracy.\n\n Args:\n message: Content of the reply.\n post: Post object to reply to.\n\n Returns:\n List of Post objects containing the newest posts near the current position.\n\n Raises:\n AuthenticationError: An attempt to replace an outdated auth token failed.\n UnexpectedResponseCodeException: The server responded with an unexpected HTTP status code (that is, not 200 or 204)\n \"\"\"\n return generate_post_list(self._reply_to_post_id(color=post.color, message=message, post_id=post.post_id).json(), self)\n\n def delete_post(self, post):\n \"\"\"\n Deletes a post.\n\n Args:\n post: Post object to delete.\n\n Returns:\n True if the deletion request was successfully sent.\n\n Raises:\n AuthenticationError: An attempt to replace an outdated auth token failed.\n UnexpectedResponseCodeException: The server responded with an unexpected HTTP status code (that is, not 200 or 204)\n \"\"\"\n self._delete_post_id(post.post_id)\n return True\n\n def upvote_post(self, post):\n \"\"\"\n Upvotes a post.\n\n Args:\n post: Post object to upvote.\n\n Returns:\n False if the currently logged in user has already voted on this post, True if the vote was successful.\n\n Raises:\n AuthenticationError: An attempt to replace an outdated auth token failed.\n UnexpectedResponseCodeException: The server responded with an unexpected HTTP status code (that is, not 200 or 204).\n \"\"\"\n if post.voted is not None:\n return False\n\n else:\n self._vote_post_id(post.post_id, 'up')\n return True\n\n def downvote_post(self, post):\n \"\"\"\n Downvotes a post.\n\n Args:\n post: Post object to downvote.\n\n Returns:\n False if the currently logged in user has already voted on this post, True if the vote was successful.\n\n Raises:\n AuthenticationError: An attempt to replace an outdated auth token failed.\n UnexpectedResponseCodeException: The server responded with an unexpected HTTP status code (that is, not 200 or 204).\n \"\"\"\n if post.voted is not None:\n return False\n\n else:\n self._vote_post_id(post.post_id, 'down')\n return True\n\n\nclass Post:\n \"\"\"\n A Jodel post.\n\n In addition to the explicitly declared attributes, Post instances will also return data for any key found in the json\n data used for instantiation.\n\n Attributes:\n voted (str): \"up\"/\"down\" if the user fetching the post has voted on the post. None if the user has not voted.\n vote_count (int): Signed integer indicating how many votes the post has.\n has_replies (bool): True if the post has replies, False if it does not.\n reply_from_op (bool): True if the post was made by someone replying to their own thread.\n replies (list): List of Post objects representing the replies to this post. Empty list if there are no replies.\n reply_count (int): The number of replies to this post.\n is_image (bool): True if the post contains an image, False if it does not.\n image_url (str): None if the post doesn't contain an image, AWS url if it does.\n thumbnail_url (str): None if the post doesn't contain an image, AWS url if it does.\n created_at (datetime): Time the post was created.\n updated_at (datetime): Time the post was last updated (seems to always be the same as created_at).\n own_post (boolean): True if the post was written by the user who fetched it, False if it was not.\n distance (int): Euclidean post distance in kilometers (very_close: 0..1, close: 2..10, city_name: 11+).\n location (dict): Dictionary mapping 'lat', 'lng' and 'name' to latitude, longitude and name.\n message (str): The contents of the post. Empty string it no message is found.\n color (str): Six character hex describing the color of the post. FFFFFF if no color is found.\n post_id (str): Alphanumeric string identifying the post.\n user_handle (str): Alphanumeric string identifying a user in the current thread.\n \"\"\"\n def __init__(self, json_dict, pydel_instance=None):\n \"\"\"\n Instantiates a Post object.\n\n Args:\n json_dict: Dictionary describing a Jodel post.\n (optional) pydel_instance: A Pydel instance used for voting/replying/deleting.\n\n Raises:\n InvalidPostException: json_dict does not describe a valid Jodel (typically, it does map post_id)\n \"\"\"\n if 'post_id' not in json_dict:\n raise InvalidPostException('Post data did not contain post_id', json_dict)\n self._json_dict = json_dict\n self._pydel_instance = pydel_instance\n\n def upvote(self):\n \"\"\"\n Upvotes this post using the Pydel instance given in the constructor.\n\n Returns:\n False if the currently logged in user has already voted on this post, True if the vote was successful.\n\n Raises:\n AuthenticationError: An attempt to replace an outdated auth token failed.\n UnexpectedResponseCodeException: The server responded with an unexpected HTTP status code (that is, not 200 or 204).\n NoPydelInstanceException: This Post instance was not instantiated with a Pydel instance.\n \"\"\"\n if self._pydel_instance is not None:\n return self._pydel_instance.upvote_post(self)\n else:\n raise NoPydelInstanceException()\n\n def downvote(self):\n \"\"\"\n Downvotes this post using the Pydel instance given in the constructor.\n\n Returns:\n False if the currently logged in user has already voted on this post, True if the vote was successful.\n\n Raises:\n AuthenticationError: An attempt to replace an outdated auth token failed.\n UnexpectedResponseCodeException: The server responded with an unexpected HTTP status code (that is, not 200 or 204).\n NoPydelInstanceException: This Post instance was not instantiated with a Pydel instance.\n \"\"\"\n if self._pydel_instance is not None:\n return self._pydel_instance.downvote_post(self)\n else:\n raise NoPydelInstanceException()\n\n def reply(self, message):\n \"\"\"\n Replies to this post using the Pydel instance given in the constructor.\n\n Args:\n message: Post message\n\n Returns:\n List of Post objects containing the newest posts near the current position.\n\n Raises:\n AuthenticationError: An attempt to replace an outdated auth token failed.\n UnexpectedResponseCodeException: The server responded with an unexpected HTTP status code (that is, not 200 or 204)\n NoPydelInstanceException: This Post instance was not instantiated with a Pydel instance.\n \"\"\"\n if self._pydel_instance is not None:\n return self._pydel_instance.new_reply(message, self)\n\n else:\n raise NoPydelInstanceException()\n\n def delete(self):\n \"\"\"\n Deletes this post using the Pydel instance given in the constructor.\n\n Returns:\n True if the deletion request was successfully sent.\n\n Raises:\n AuthenticationError: An attempt to replace an outdated auth token failed.\n UnexpectedResponseCodeException: The server responded with an unexpected HTTP status code (that is, not 200 or 204)\n NoPydelInstanceException: This Post instance was not instantiated with a Pydel instance.\n UnauthorizedDeletionException: The Pydel instance associated with this Post object does not own the post.\n \"\"\"\n if not self.own_post:\n raise UnauthorizedDeletionException(self.post_id)\n\n elif self._pydel_instance is None:\n raise NoPydelInstanceException\n\n else:\n return self._pydel_instance.delete_post(self)\n\n @property\n def voted(self):\n if 'voted' in self._json_dict:\n return self._json_dict['voted']\n else:\n return None\n\n @property\n def vote_count(self):\n return self._json_dict['vote_count']\n\n @property\n def has_replies(self):\n return 'child_count' in self._json_dict and self._json_dict['child_count'] != 0\n\n @property\n def reply_from_op(self):\n if 'parent_creator' not in self._json_dict:\n return False\n else:\n return self._json_dict['parent_creator'] == 1\n\n @property\n def replies(self):\n if self.has_replies:\n return generate_post_list(self._json_dict['children'], self._pydel_instance)\n else:\n return []\n\n @property\n def reply_count(self):\n if 'child_count' in self._json_dict:\n return self._json_dict['child_count']\n else:\n return 0\n\n @property\n def is_image(self):\n return 'image_url' in self._json_dict\n\n @property\n def image_url(self):\n if 'image_url' in self._json_dict:\n return self._json_dict['image_url']\n else:\n return None\n\n @property\n def thumbnail_url(self):\n if 'thumbnail_url' in self._json_dict:\n return self._json_dict['thumbnail_url']\n else:\n return None\n\n @property\n def created_at(self):\n return utils.iso8601_to_datetime(self._json_dict['created_at'])\n\n @property\n def updated_at(self):\n return utils.iso8601_to_datetime(self._json_dict['updated_at'])\n\n @property\n def own_post(self):\n return self._json_dict['post_own'] == 'own'\n\n @property\n def distance(self):\n return self._json_dict['distance']\n\n @property\n def location(self):\n location = self._json_dict['location']\n return {\n 'lat': location['loc_coordinates']['lat'],\n 'lng': location['loc_coordinates']['lng'],\n 'name': location['name']\n }\n\n @property\n def message(self):\n if 'message' in self._json_dict:\n return self._json_dict['message']\n else:\n return ''\n\n @property\n def color(self):\n if 'color' in self._json_dict:\n return self._json_dict['color']\n else:\n return \"FFFFFF\"\n\n @property\n def post_id(self):\n return self._json_dict['post_id']\n\n @property\n def user_handle(self):\n return self._json_dict['user_handle']\n\n def __getattr__(self, key):\n if key in self._json_dict:\n return self._json_dict[key]\n else:\n raise AttributeError\n\n\ndef generate_post_list(json_data, pydel_instance):\n return [Post(p, pydel_instance) for p in json_data]\n" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.6538461446762085, "avg_line_length": 16.33333396911621, "blob_id": "8536ee7dd7d36b3de6104c3986641782efe63131", "content_id": "176efe3b1d628d79ddc348c231bcdd233bcdc619", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 104, "license_type": "permissive", "max_line_length": 19, "num_lines": 6, "path": "/pydel/colors.py", "repo_name": "novag/pydel", "src_encoding": "UTF-8", "text": "ORANGE = 'FF9908'\nYELLOW = 'FFBA00'\nRED = 'DD5F5F'\nBLUE = '06A3CB'\nBLUEGREY = '8ABDB0'\nGREEN = '9EC41C'\n" }, { "alpha_fraction": 0.6328358054161072, "alphanum_fraction": 0.6985074877738953, "avg_line_length": 19.9375, "blob_id": "bb5bf12917842f9b53b30a4b5295040d9fdca1b3", "content_id": "10204767249c79df957be0214854fbc6e07822e3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 335, "license_type": "permissive", "max_line_length": 72, "num_lines": 16, "path": "/pydel/utils.py", "repo_name": "novag/pydel", "src_encoding": "UTF-8", "text": "import hashlib\nimport datetime\nimport uuid\nimport random\n\n\ndef random_device_uid():\n return hashlib.sha256(str(uuid.uuid4()).encode('UTF-8')).hexdigest()\n\n\ndef random_loc_accuracy():\n return random.uniform(1.0, 15.0)\n\n\ndef iso8601_to_datetime(iso8601):\n return datetime.datetime.strptime(iso8601, \"%Y-%m-%dT%H:%M:%S.%fZ\")\n" }, { "alpha_fraction": 0.5789473652839661, "alphanum_fraction": 0.7368420958518982, "avg_line_length": 18.5, "blob_id": "6c4098ccd78deb766b151bce907c782d771452f4", "content_id": "a003ce17770b5cfd8de9d5c517d794e903672f53", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 38, "license_type": "permissive", "max_line_length": 22, "num_lines": 2, "path": "/requirements.txt", "repo_name": "novag/pydel", "src_encoding": "UTF-8", "text": "ndg-httpsclient==0.4.0\nrequests==2.9.0" } ]
7
COSC481W-2020Fall/cosc481w-581-2020-fall-stattracker
https://github.com/COSC481W-2020Fall/cosc481w-581-2020-fall-stattracker
89eae323f9625d51bb04e818ab67eebb0219c9cb
72d08035d11a935f4998b772853f9b218b112900
cea9c0f84144ed6d0e4915f48d2d9e0fc2a936f7
refs/heads/master
2023-06-09T04:37:53.813984
2020-12-16T15:11:43
2020-12-16T15:11:43
293,622,016
0
0
null
2020-09-07T20:07:14
2020-12-07T00:02:46
2020-12-07T18:12:42
Python
[ { "alpha_fraction": 0.7093648910522461, "alphanum_fraction": 0.7222820520401001, "avg_line_length": 25.457143783569336, "blob_id": "f30c92af7466e0b09b5c74ba4ffb947953828ca4", "content_id": "41e767e2f0be79c19880c436b3c9e164a6b71858", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 929, "license_type": "no_license", "max_line_length": 100, "num_lines": 35, "path": "/unittestdom.py", "repo_name": "COSC481W-2020Fall/cosc481w-581-2020-fall-stattracker", "src_encoding": "UTF-8", "text": "import unittest\nimport sqlalchemy\nimport sqlite3\nfrom utils import addGameDB, get_champs\n\nclass AppendUnitTest(unittest.TestCase):\n\n\tdef test_appendDeck(self):\n\t\tactual = []\n\t\texpected = ['W', 'Ionia / Targon', 'Lee Sin']\n\t\tdeck = addGameDB('CIBQCAIBA4AQEAICBMBAMBIIBMGREFA4EARC2OQAAEAQGBQO',expected)\n\n\n\t\tconnection = sqlite3.connect('card_data/stattracker.db')\n\t\tc = connection.cursor()\n\t\twith connection:\n\t\t\tc.execute(\"\"\"SELECT * FROM CIBQCAIBA4AQEAICBMBAMBIIBMGREFA4EARC2OQAAEAQGBQO\"\"\")\n\t\t\trecords = c.fetchall()\n\n\t\tfor x in records:\n\t\t\tactual.append(x)\n\t\tself.assertEqual(actual, expected)\n\n\t\twith connection:\n\t\t\t\tc.execute(\"DELETE FROM CIBQCAIBA4AQEAICBMBAMBIIBMGREFA4EARC2OQAAEAQGBQO ORDER BY DESC LIMIT 1\")\n\n\tdef test_Champs(self):\n\t\tdata = get_dataframe()\n\treturn data[data['rarity'] == 'Champion']['name'].to_list()\n\tget_champs()\n\tfor x,y(data):\n\t\tself.assertEqual(x,y)\n\nif __name__ == '__main__':\n\tunittest.main()\n\n\t\t" }, { "alpha_fraction": 0.6869391798973083, "alphanum_fraction": 0.6909272074699402, "avg_line_length": 24.743589401245117, "blob_id": "77df4434c327aa569f2f7a0ec55e121942ae8604", "content_id": "18617118a132ed3654f8e3fa163c4f3c3ed9c878", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1003, "license_type": "no_license", "max_line_length": 60, "num_lines": 39, "path": "/unittesttristan.py", "repo_name": "COSC481W-2020Fall/cosc481w-581-2020-fall-stattracker", "src_encoding": "UTF-8", "text": "import unittest\nfrom utils import buildFromCode\nfrom lor_deckcodes import LoRDeck, CardCodeAndCount\nfrom utils import get_dataframe\n\nclass DeckBuilderTests(unittest.TestCase):\n\t\"\"\"docstring for DeckBuilderTests\"\"\"\n\n\tdef test_get_dataframe(self):\n\t\tdata = get_dataframe()\n\t\tself.assertEqual(len(data), 601)\n\t\tmax_num_copies = data['name'].value_counts().max()\n\t\tself.assertEqual(max_num_copies, 1)\n\t\t\n\tdef test_buildFromCode(self):\n\t\tdeck = get_dataframe()\n\t\t\n\t\ttestDeck = deck['cardCode'].value_counts().to_dict()\n\t\tf = lambda key, value: str(value) + ':' + key\n\n\t\tcardList = []\n\t\tfor key, value in zip(testDeck.keys(), testDeck.values()):\n\t\t\tcardList.append(f(key, value))\n\n\t\ttestDeck = LoRDeck(cardList)\n\t\tcode = testDeck.encode()\n\t\ttestDataframe = buildFromCode(code)\n\n\t\tvalid = deck['cardCode'].unique().tolist()\n\t\ttest = testDataframe['cardCode'].unique().tolist()\n\n\t\tvalid.sort()\n\t\ttest.sort()\n\n\t\tfor x, y in zip(valid, test):\n\t\t\tself.assertEqual(x, y)\n\nif __name__ == '__main__':\n\tunittest.main()" }, { "alpha_fraction": 0.6722378134727478, "alphanum_fraction": 0.6768229603767395, "avg_line_length": 28.65350914001465, "blob_id": "98a0e3a3257ec5ad30cd5e41116895e1804b5236", "content_id": "0dcf820c6841c514fc220f2c2590088d46e529f1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6761, "license_type": "no_license", "max_line_length": 92, "num_lines": 228, "path": "/main.py", "repo_name": "COSC481W-2020Fall/cosc481w-581-2020-fall-stattracker", "src_encoding": "UTF-8", "text": "from flask import Flask, render_template, request, redirect, url_for, flash\nfrom utils import *\nfrom gallery import g_home\nfrom statsviewer import sv_home\nimport pandas as pd\nimport glob\nimport os\nimport numpy as np\nimport sys\n\napp = Flask(__name__)\napp.secret_key = os.urandom(24)\n\n@app.route('/home', methods =[\"GET\",\"POST\"])\ndef Home():\n\tif request.method == \"POST\":\n\t\tif request.form.get(\"Deck_Builder\"):## Sends you to the Deck Builder Page\n\t\t\treturn redirect(\"deckbuilder\")\n\t\tif request.form.get(\"Card_Gallery\"):## Sends you to the Card Gallery Page\n\t\t\treturn redirect(\"gallery\")\n\t\tif request.form.get(\"dataInputPage\"):## Sends you to the Data Input Page\n\t\t\treturn redirect(\"datainput\")\n\telse:\n\t\treturn render_template(\"Home.html\")\n## Start code for dataInputPage\ndecks = glob.glob('decks/*.csv')\nchamps = get_champs()\nchamps.append('None')\n\n@app.route('/datainput', methods=['GET','POST'])\ndef data_input():\n\tif request.method == 'POST':\n\t\tcode = request.form['cardCode']\n\t\tif code == \"\":\n\t\t\tflash(\"Deckcode can not be empty!\")\n\t\t\treturn redirect(request.url)\n\t\telse:\n\t\t\tisValid = False\n\t\t\ttry:\n\t\t\t\tdeck = LoRDeck.from_deckcode(code)\n\t\t\t\tisValid = True\n\t\t\texcept:\n\t\t\t\tpass\n\t\tprint(isValid)\n\t\toutcome = request.form['wl']\n\t\tregions = request.form['region1']\n\t\tif request.form['region'] not in regions:\n\t\t\tregions += \" / \" + request.form['region']\n\t\t# Adding the champions\n\t\tstrCh = \" \"\n\t\ttheChamps = \" \"\n\t\tfor key in request.form.keys():\n\t\t\tif key.startswith(\"champ\"):\n\t\t\t\tif len(theChamps) == 1:\n\t\t\t\t\ttheChamps = request.form[key]\n\t\t\t\telif request.form[key] not in theChamps:\n\t\t\t\t\ttheChamps += \" / \" + request.form[key]\n\t\tstats = [outcome, regions, theChamps]\n\t\taddGameDB(code, stats)\n\t\tif isValid:\n\t\t\tif outcome == \"Win\":\n\t\t\t\tflash(\"Congratulations on your victory!\")\n\t\t\t\treturn redirect(request.url)\n\t\t\telse:\n\t\t\t\tflash(\"Better luck next time...\")\n\t\t\t\treturn redirect(request.url)\n\t\telse:\n\t\t\tflash(\"Invalid Deckcode!\")\n\t\t\treturn redirect(request.url)\n\treturn render_template(\n\t\t'dataInputPage/dataInputPage.html',\n\t\tdecks=decks,\n\t\tchamps=champs)\n\n\n## Start code for deckbuilder\ndecks = glob.glob('decks/*.csv')\ndataFrame = get_dataframe()\ndataFrame = dataFrame.sort_values(['region','rarity'], ignore_index = True)\ncardList = dataFrame.to_html()\n\n@app.route('/deckbuilder', methods=['GET','POST'])\ndef deck_builder():\n\n\tif request.method == 'POST': ## Gets some input from page\n\n\t\tif request.form.get('Home'): ## Redirect to home page\n\t\t\treturn redirect('/')\n\n\t\t## Create new deck from code\n\t\tif request.form.get('fromCode'):\n\t\t\tdeckName = request.form['deckName']\n\t\t\tcode = request.form['deckCode']\n\t\t\tactiveDeck = buildFromCode(code)\n\t\t\tpath = f'decks/{deckName}.csv'\n\t\t\tactiveDeck.to_csv(path, index=False)\n\t\t\tif path not in decks:\n\t\t\t\tdecks.append(path)\n\t\t\t\tactiveDeck = activeDeck.to_html()\n\t\t\telif path in decks or not deckName:\n\t\t\t\tactiveDeck = None\n\n\t\t## Create new empty deck\n\t\tif not request.form.get('fromCode') and request.form.get('deckName'):\n\t\t\tdeckName = request.form['deckName']\n\t\t\t## Save deck and add to list of available decks\n\t\t\tcolumns = dataFrame.columns\n\t\t\tactiveDeck = pd.DataFrame(columns=dataFrame.columns)\n\t\t\tactiveDeck['count'] = None\n\t\t\tpath = f'decks/{deckName}.csv'\n\t\t\tactiveDeck.to_csv(path, index=False)\n\t\t\tif path not in decks:\n\t\t\t\tdecks.append(path)\n\t\t\t\tactiveDeck = activeDeck.to_html()\n\t\t\telif path in decks or not deckName:\n\t\t\t\tactiveDeck = None\n\n\t\t## Select deck to view\n\t\tif request.form.get('actions') == 'viewDeck':\n\t\t\tdeckName = request.form['selectDeck']\n\t\t\tactiveDeck = pd.read_csv(deckName)\n\t\t\tactiveDeck = activeDeck.to_html()\n\n\t\t## Deletes deck\n\t\tif request.form.get('actions') == 'deleteDeck':\n\t\t\tdeckName = request.form['selectDeck']\n\t\t\tos.remove(deckName)\n\t\t\tif deckName in decks:\n\t\t\t\tdecks.remove(deckName)\n\t\t\tdeckName = None\n\t\t\tactiveDeck = None\n\n\t\t## Add card to deck\n\t\tif request.form.get('actions') == 'addCard': ## Receive card id to add to deck\n\t\t\tcardID = request.form['cardID']\n\t\t\trow = dataFrame.loc[dataFrame['cardCode'] == cardID]\n\t\t\trow = row.reset_index(drop=True)\n\t\t\trow.loc[0, 'count'] = 1\n\t\t\tisChamp = row.iloc[0]['rarity'] == 'Champion'\n\n\t\t\tdeckName = request.form['selectDeck']\n\t\t\t## Write row to specified deck database\n\t\t\tactiveDeck = pd.read_csv(deckName)\n\n\t\t\tif len(activeDeck) > 0:\n\t\t\t\trarity = activeDeck['rarity'].value_counts()\n\t\t\t\tmask = (activeDeck['rarity'] == 'Champion')\n\t\t\t\tnumChampions = sum(mask * activeDeck['count'])\n\n\t\t\t\tif cardID in list(activeDeck['cardCode']):\n\t\t\t\t\tnumCopies = activeDeck.loc[activeDeck['cardCode'] == cardID, 'count'].item()\n\t\t\t\telse:\n\t\t\t\t\tnumCopies = 0\n\t\t\telse:\n\t\t\t\tnumCopies = 0\n\t\t\t\tnumChampions = 0\n\n\t\t\tisValid = False\n\t\t\tif cardID in list(dataFrame['cardCode']):\n\t\t\t\tisValid = True\n\n\t\t\tif isValid:\n\t\t\t\tif numCopies == 0:\n\t\t\t\t\tactiveDeck = activeDeck.append(row, ignore_index=True)\n\t\t\t\telif isChamp and numCopies < 3 and numChampions < 5 and len(activeDeck) < 40:\n\t\t\t\t\tactiveDeck.loc[activeDeck['cardCode'] == cardID, 'count'] += 1\n\t\t\t\telif not isChamp and numCopies < 3 and len(activeDeck) < 40:\n\t\t\t\t\tactiveDeck.loc[activeDeck['cardCode'] == cardID, 'count'] += 1\n\n\t\t\tactiveDeck['count'] = activeDeck['count'].astype(int)\n\t\t\tactiveDeck.to_csv(deckName, index=False)\n\t\t\tactiveDeck = activeDeck.to_html()\n\n\t\t## Delete card\n\t\tif request.form.get('actions') == 'deleteCard':\n\t\t\tcardIDDelete = request.form['cardID']\n\t\t\tdeckName = request.form['selectDeck']\n\t\t\tactiveDeck = pd.read_csv(deckName)\n\n\t\t\tif cardIDDelete in list(activeDeck['cardCode']):\n\t\t\t\tnumCopies = activeDeck.loc[activeDeck['cardCode'] == cardIDDelete, 'count'].item()\n\t\t\t\tif numCopies > 1:\n\t\t\t\t\tactiveDeck.loc[activeDeck['cardCode'] == cardIDDelete, 'count'] -= 1\n\t\t\t\telse:\n\t\t\t\t\tactiveDeck.drop(activeDeck[activeDeck['cardCode'] == cardIDDelete].index, inplace=True)\n\n\t\t\tactiveDeck.to_csv(deckName, index=False)\n\t\t\tactiveDeck = activeDeck.to_html()\n\n\t\t## Export deck to code\n\t\tif request.form.get('actions') == 'exportDeck':\n\t\t\tdeckName = request.form['selectDeck']\n\t\t\tactiveDeck = pd.read_csv(deckName)\n\t\t\tcode = exportCode(activeDeck)\n\t\t\tactiveDeck = activeDeck.to_html()\n\t\t\tprint(code)\n\t\t\tflash(code)\n\n\n\t\t## Renders webpage after post request\n\t\treturn render_template(\n\t\t\t'deckbuilder/deckbuilder.html',\n\t\t\tdeckName=deckName,\n\t\t\tcardList=cardList,\n\t\t\tactiveDeck=activeDeck,\n\t\t\tdecks=decks)\n\n\t## Initial page request\n\telif request.method == 'GET':\n\t\treturn render_template(\n\t\t\t'deckbuilder/deckbuilder.html',\n\t\t\tdeckName=None,\n\t\t\tcardList=cardList,\n\t\t\tactiveDeck=None,\n\t\t\tdecks=decks)\n## End code for deckbuilder\n\n@app.route('/gallery')\n@app.route('/gallery/<cardcode>')\ndef gallery(cardcode = None):\n\treturn g_home(cardcode)\n\n@app.route('/statsviewer')\ndef statsviewer():\n\treturn sv_home()\n\nif __name__ == '__main__':\n\tapp.run(host = \"0.0.0.0\", port = 5000, debug = True)\n" }, { "alpha_fraction": 0.7523163557052612, "alphanum_fraction": 0.7554599642753601, "avg_line_length": 42.17142868041992, "blob_id": "4bbfbce1e6419d01955d71e587ae6d9658d5d96d", "content_id": "01507edb717d165e9952846eaf7fa3ada60a2317", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 6090, "license_type": "no_license", "max_line_length": 591, "num_lines": 140, "path": "/README.md", "repo_name": "COSC481W-2020Fall/cosc481w-581-2020-fall-stattracker", "src_encoding": "UTF-8", "text": "# Legends of Runeterra StatTracker\n\nA program for keeping track of the popularity of cards.\n\nTo activate the env on unbuntu use: source env/bin/activate\n\n<h2 align=\"center\">Prototype Description</h2>\n\n- A user interface for the user to choose their options\n\n\t- Building and naming new decks\n\n\t\t- The user can build custom decks.\n\n\t\t\t- Users will add cards to the deck one at a time from a library of cards available from the [API](https://developer.riotgames.com/docs/lor).\n\n\t\t\t- The program will keep track of deck size and space left.\n\n\t\t\t- The program will monitor the deck for any errors with its structure including validity and size and will notify the user if errors become apparent.\n\n\t\t\t- The program will not let the user add cards that are out of the parameters of legal deck building.\n\n\t\t\t- The user can cancel to stop building their deck\n\n\t\t\t- The user can confirm the deck to save it.\n\n\t\t- Decks will be viewable in an inventory page for the user to browse and explore.\n\n\t\t\t- Existing decks can be edited to add or remove cards.\n\n\t\t\t- Decks can be deleted.\n\n\t- Save/restore deck(s)\n\n\t\t- The user can store their decks with reference to each card's information, and then restore the decks for later use\n\n\t- View stats\n\n\t\t- The user can choose a card from the API and view it's information/stats\n\n- Database to hold card information\n\n\t- CSV\n\n\t- Holds card information that is valuable to the user\n\n\t- Database structured to make it easy to see which cards are more popular to use and what cards work well together\n\n- Web interface\n\n\t- Built with flask\n\n\t- Simple to use without having to download any software\n\n\t- Accessible with any major web browser that the user has\n\n\t- 4 main pages:\n\n\t\t- Building a deck\n\n\t\t- Importing/exporting decks\n\n\t\t- Browsing card information\n\n\t\t- Viewing information/stats\n\n- Card information\n\n\t- Name of the card\n\n\t- Costs\n\n\t- 'Stats'\n\n\t- Ratio of use in gameplay\n\n\t- win/loss percentage\n\n\t- Common cards used together\n\n<h2 align=\"center\">Use Case Diagram</h2>\n\n![use case diagram](use-case-diagram.png)\n\n<h2 align=\"center\">Use Case Diagram (Database)</h2>\n\n![use case diagram (database)](use-case-diagram-database.png)\n\n<h4 style = \"text-align: center;\">Potential Plan for Transferring to a Database:</h4>\n\nThe import package used in python is SQLAlchemy, part of flask. \n\nThis allows us to create and refer to a database (outlined below) created through SQLite. SQLite is part of python 3 (and above) - eliminating the need to download extra software; the lite version eliminates the need for servers (and etc...). Instead of creating the database from scratch, we will be able transfer our already existing csv files to the database. \nBoth creating the database and importing the csv files will be done through python. Currently, our deck building page creates decks as csv files - this makes translating them to the database with our current code simpler. Potentially, we can alter the code later to link to the database, so the decks are directly added upon creation. \n\nThe database will have a ‘master list’ that contains every card in Runeterra’s API (that we’ve collected thus far) and contains the following: \n\n- Card code\n- Number of wins\n- Number of losses \n- Pending others?\n\nThe database will also refer to the decks created by a user with an identical layout - with one exception (explained below). There potentially will be an added column for 'WinRate'. The database will be accessed through the deckbuilder page. It will be used to keep track of how many wins and losses each deck has, marking which cards were used - incrementing their count accordingly. This ratio of wins/losses per card will then be transferred to the ‘master list’. Perhaps this transfer can be done after a ‘gaming session’ is completed - making it easier to record data as the user plays?\n\n<u> Suggestion(s) for implementing this: </u>\n\n- The user can increment wins/losses for each card, added directly to the ‘master list’\n- The user can incremenet wins/losses for an entire deck... in the ‘current deck’:\n\t- Toggle option: (next to each card) Tells the system that what was played in game\n\t- Toggle option: (below table) Tells the system if the user won\n\t- Button: Tells the system that a game was completed\n\t\t- [ACTION] Store the data/stats derived from game directly to ‘current deck’s’ stats list\n\t- Button: Tells the system that the user has completed a game session\n\t\t- [ACTION] Transfers ‘current deck’s’ wins/losses to ‘master list’\n\t\t- [ACTION] Resets wins/losses columns to 0\n\n(I'm a visual person, so in case it helps, here's an example of what I kind of had in mind:)\n\n![win/losses example](wins_lossesExample.jpg)\n\n** The only reason I didn't draw in the other details that are already part of the deckbuilder page was just to keep it simple :) **\n\nPotential problem: The win rate wouldn’t be stored for each individual deck. We could add a column titled ‘WinRate’ unique to the built decks and stored the \npercentage of wins/games played - which is ignored during the transfer to the 'master list'. This would then add one more [ACTION] after 'game session' is completed: calculating the percentage of wins versus games played and incrementing applying it to the 'winrate' column (or there could be four columns: Wins, Losses, Total wins, Total losses).\n\nPending: implementation to determine which cards work best with which.\n\n<h4 style = \"text-align: center;\">Resources for reference:</h4>\n\n$ pip install sqlalchemy\n\n<a href=\"https://www.youtube.com/watch?v=Z1RJmh_OqeA&ab_channel=freeCodeCamp.org\"> Flask Tutorial (around 15:00 for database) </a>\n\n<a href=\"https://www.youtube.com/watch?v=o-vsdfCBpsU&ab_channel=sentdex\"> Create SQLite (Video) </a>\n\n<a href=\"https://www.sqlitetutorial.net/sqlite-cheat-sheet/\"> SQLite Cheat Sheet </a>\n\n<a href=\"https://www.kite.com/python/answers/how-to-insert-the-contents-of-a-csv-file-into-an-sqlite3-database-in-python\"> Adding CSV to Database </a>\n\n<a href=\"https://www.tutorialspoint.com/sqlalchemy/sqlalchemy_core_sql_expressions.htm\"> SQLAlchemy </a>\n" }, { "alpha_fraction": 0.6871360540390015, "alphanum_fraction": 0.6998412013053894, "avg_line_length": 25.619718551635742, "blob_id": "b1c4d55af5aaf09d3833b980c1ea2f49a6e6e871", "content_id": "0b8f8c49d5f7535520b2b72ad1ce52c29a4a4220", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1889, "license_type": "no_license", "max_line_length": 107, "num_lines": 71, "path": "/statsviewer.py", "repo_name": "COSC481W-2020Fall/cosc481w-581-2020-fall-stattracker", "src_encoding": "UTF-8", "text": "from flask import render_template\nimport sqlite3\nimport pandas as pd\nfrom lor_deckcodes import LoRDeck, CardCodeAndCount\n\ndef sv_home():\n\tavoidTables = ['set', 'API']\n\n\t# get dataframe of db\n\tcnx = sqlite3.connect(\"card_data/stattracker.db\")\n\tdf = pd.read_sql_query(\"select name from sqlite_master where type = 'table'\", cnx)\n\n\t# extract deck names\n\tdeckNames = []\n\tfor index, row in df.iterrows():\n\t\tif row['name'] not in avoidTables:\n\t\t\tdeckNames.append(row['name'])\n\t\n\tframesToConcat = []\n\tfor i in range(len(deckNames)):\n\n\t\t# get stats for deck\n\t\tunnamedDf = pd.read_sql_query(\"select * from \" + deckNames[i], cnx)\n\n\t\t# add deck name to data\n\t\t# https://stackoverflow.com/a/53236864/13157180\n\t\tnameCol = []\n\t\tfor j in range(len(unnamedDf.index)):\n\t\t\tnameCol.append(deckNames[i])\n\t\tnameDf = pd.DataFrame(nameCol, columns=['Deck Name'])\n\t\tunnamedDf.append(nameDf)\n\n\t\t# add frames to array\n\t\tframesToConcat.append(pd.concat([unnamedDf, nameDf], axis=1))\n\n\t# make df out of array\n\tdeckData = pd.concat(framesToConcat)\n\tdeckData.reset_index(drop=True, inplace=True) # reset indexes\n\n\t# make df of cards in each deck\n\tCardDataCardCode = []\n\tCardDataDeckCode = []\n\tfor deckName in deckNames:\n\t\ttry:\n\t\t\tdeck = LoRDeck.from_deckcode(deckName)\n\t\t\tfor card in deck.cards:\n\t\t\t\tCardDataCardCode.append(str(card)[2:]) # trim quantity\n\t\t\t\tCardDataDeckCode.append(deckName)\n\t\texcept ValueError:\n\t\t\t# Invalid deck code, skip\n\t\t\tpass\n\tcardData = pd.DataFrame(data={'Deck Name':CardDataDeckCode, 'Card Code':CardDataCardCode})\n\n\treturn render_template('statsviewer/index.html', deckData=deckData.to_json(), cardData=cardData.to_json())\n\ndef count_wins(df):\n\tcount = 0\n\tfor row in df['Win/Loss']:\n\t\tif row == 'W':\n\t\t\tcount += 1\n\treturn count\n\ndef count_losses(df):\n\tcount = 0\n\tfor row in df['Win/Loss']:\n\t\tif row == 'L':\n\t\t\tcount += 1\n\treturn count\n\ndef win_avg(df):\n\treturn count_wins(df) / len(df.index)" }, { "alpha_fraction": 0.630602240562439, "alphanum_fraction": 0.6390056014060974, "avg_line_length": 36.09090805053711, "blob_id": "552002acc60679659db5fafcfc4b4926742e45f4", "content_id": "c7144b04c2064dd46c9f8b9ff88dfa6b0a5d4b70", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2856, "license_type": "no_license", "max_line_length": 110, "num_lines": 77, "path": "/unittestKris.py", "repo_name": "COSC481W-2020Fall/cosc481w-581-2020-fall-stattracker", "src_encoding": "UTF-8", "text": "import unittest\nimport sqlalchemy\nimport sqlite3\nfrom utils import getDeck, createUser, addUserDeck\n\nclass databaseTests(unittest.TestCase):\n def test_addBlank(self):\n deck = getDeck('Test') # should create test deck, adds a game\n\n actual = []\n expected = [\"Win/Loss\", \"Opponent Regions\", \"Opponent Champs\"]\n\n # establishing connection\n connection = sqlite3.connect('card_data/stattracker.db')\n c = connection.cursor()\n with connection:\n check = c.execute(\"PRAGMA TABLE_INFO(Test)\")\n for x in check:\n actual.append(x[1])\n self.assertEqual(actual, expected)\n\n # Deletes test deck when done\n with connection:\n c.execute(\"DROP TABLE Test\")\n\n def test_addUser(self):\n # if user exists - return True\n # if user does not exist = return False and add to db\n testUser = createUser('test') # create fake user that doesn't exist\n testUser2 = createUser('ryoumi') # try to create user that does exist\n\n # hardcoding the test\n self.assertEqual(testUser, False)\n\n # delete fake user\n connection = sqlite3.connect('card_data/usersdecks.db')\n c = connection.cursor()\n with connection:\n c.execute(\"DROP TABLE test\")\n\n self.assertEqual(testUser2, True)\n\n def test_adduserDeck(self):\n testUser = createUser('testDeck') # creating fake user\n\n stats = [\"Generic deck name\", \"CIBAIAYGAQDQQDYIAMESGKJNGM2DOPC4AEBAGCITKUAA\"]\n addUserDeck('testDeck', stats) # 1\n stats = [\"Second deck name\", \"CIBQCAIBA4AQEAICBMBAMBIIBMGREFA4EARC2OQAAEAQGBQO\"]\n addUserDeck('testDeck', stats) # 2\n stats = [\"Final deck name\", \"CICACAQEBIBACAIEEABAEAICA4CQCBAIDENTIOQCAEBACCQDAECCOMJVAIAQCAJKAEAQIGQ\"]\n addUserDeck('testDeck', stats) # 3\n\n # since deck code is already added under \"Second deck code\" it shouldn't add this one\n stats = [\"Check this out\", \"CIBQCAIBA4AQEAICBMBAMBIIBMGREFA4EARC2OQAAEAQGBQO\"]\n addUserDeck('testDeck', stats) # 4\n\n # masking sure it adds to full deck stats if not exist\n stats = [\"Dummy test\", \"TestTest\"]\n addUserDeck('testDeck', stats) # 5\n\n connection = sqlite3.connect('card_data/usersdecks.db')\n c = connection.cursor()\n with connection:\n c.execute(\"SELECT count(*) FROM testDeck\")\n deckCount = c.fetchone()[0]\n\n c.execute(\"DROP TABLE testDeck\") # deleting added decks for testing\n\n masConnection = sqlite3.connect('card_data/stattracker.db')\n c = masConnection.cursor()\n with connection:\n c.execute(\"DROP TABLE TestTest\") # deleting added decks for testing\n\n self.assertEqual(deckCount, 4) # the actual test portion\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.6795827150344849, "alphanum_fraction": 0.6967213153839111, "avg_line_length": 31.7560977935791, "blob_id": "a1947ba9e830325c35ea4f918e57da33947a7d5d", "content_id": "f669eda7b2c765dc49c0f47d7390c886f1d21ddb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1342, "license_type": "no_license", "max_line_length": 124, "num_lines": 41, "path": "/gallery.py", "repo_name": "COSC481W-2020Fall/cosc481w-581-2020-fall-stattracker", "src_encoding": "UTF-8", "text": "import json\nimport html\nimport pandas as pd\nimport glob\nfrom flask import render_template\nfrom pandas.io.json import json_normalize\n\ndef g_home(cardcode):\n\t# get data\n\t# df = pd.read_json('card_data/set1-en_us.json')\n\tfilenames = glob.glob('card_data/*.json')\n\tdfs = []\n\tfor filename in filenames:\n\t\tdfs.append(pd.read_json(filename))\n\tdf = pd.concat(dfs, ignore_index=True)\n\n\tif cardcode == None:\n\t\t# limit data to these fields\n\t\tdf = df[['name', 'cardCode']].sort_values('name')\n\t\tprint(df)\n\t\t# render page and add json data as js variable\n\t\tjsondata = json.loads(df.to_json())\n\t\treturn render_template('gallery/index.html', jsondata=jsondata)\n\n\telse:\n\t\t# limit data to these fields\n\t\tdf = df[['name', 'assets', 'description', 'cardCode', 'cost', 'type']]\n\t\t# find the row that matches the cardcode parameter\n\t\tdf = df.loc[df['cardCode'] == cardcode]\n\t\t# extract the card information\n\t\tname = df.iloc[0]['name']\n\t\timage = df.iloc[0].assets[0]['fullAbsolutePath']\n\t\tdescription = df.iloc[0].description\n\t\tcost = df.iloc[0]['cost']\n\t\tcardtype = df.iloc[0]['type']\n\t\t# render the page\n\t\treturn render_template('gallery/card.html', name=name, image=image, description=description, cost=cost, cardtype=cardtype)\n\n# https://stackoverflow.com/a/42264209/13157180\ndef make_clickable(val):\n return '<a href=\"/gallery/{}\">Link</a>'.format(val,val)" }, { "alpha_fraction": 0.6679389476776123, "alphanum_fraction": 0.6870229244232178, "avg_line_length": 27.351350784301758, "blob_id": "8c8333d748357e1d4271ecda71665d51791e6e49", "content_id": "6da91523ae2999d2d8152d77481bd32f212348a5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1048, "license_type": "no_license", "max_line_length": 103, "num_lines": 37, "path": "/tests/statsviewertest.py", "repo_name": "COSC481W-2020Fall/cosc481w-581-2020-fall-stattracker", "src_encoding": "UTF-8", "text": "# fix import pathing https://stackoverflow.com/a/16985066/13157180\nimport sys\nimport os\n\nPACKAGE_PARENT = '..'\nSCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))\nsys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT)))\n# end of fix\n\nfrom statsviewer import count_wins, count_losses, win_avg\nimport unittest\nimport pandas as pd\n\nclass TestStatsMethods(unittest.TestCase):\n\t\n\t@classmethod\n\tdef setUpClass(cls):\n\t\tdata = {\n\t\t\t'Win/Loss': ['W', 'L', 'W'],\n\t\t\t'Opponent Regions': ['Ionia / Targon', 'Noxus / Demacia', 'Freljord / Shadow Isles'],\n\t\t\t'Opponent Champs': ['Lee Sin', 'Vladimir', 'Tryndamere / Trundle']\n\t\t}\n\t\tcolumns = ['Win/Loss', 'Opponent Regions', 'Opponent Champs']\n\t\tcls.df = pd.DataFrame(data, columns)\n\t\t# print(df)\n\n\tdef test_countwins(self):\n\t\tself.assertEqual(count_wins(self.df), 2)\n\n\tdef test_countlosses(self):\n\t\tself.assertEqual(count_losses(self.df), 1)\n\n\tdef test_countavg(self):\n\t\tself.assertEqual(win_avg(self.df), 2/3)\n\nif __name__ == '__main__':\n\tunittest.main()" }, { "alpha_fraction": 0.6929637789726257, "alphanum_fraction": 0.6985849738121033, "avg_line_length": 31.452829360961914, "blob_id": "c7abdbb4ae1d04f09fc93f3c9196f537047956fb", "content_id": "18feb171fa397af071d42b9179f38d1dc855999a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5159, "license_type": "no_license", "max_line_length": 110, "num_lines": 159, "path": "/utils.py", "repo_name": "COSC481W-2020Fall/cosc481w-581-2020-fall-stattracker", "src_encoding": "UTF-8", "text": "import json\nimport csv\nimport os\nimport pandas as pd\nimport sqlite3\nimport glob\nimport sqlalchemy\nfrom lor_deckcodes import LoRDeck, CardCodeAndCount\nimport sys\n\ndef get_dataframe():\n\tfiles = glob.glob('card_data/*.json')\n\n\tdata = []\n\tfor file in files:\n\t\tdata.append(pd.read_json(file))\n\n\tdata = pd.concat(data)\n\tdata = data[['cardCode', 'name', 'region', 'attack', 'cost', 'health', 'rarity']]\n\tfunc = lambda x: False if 'T' in x[-3:] else True\n\tmask = data['cardCode'].apply(func)\n\tdata = data[mask]\n\n\t## This will be changed to grab the wins and losses from database\n\t# data['winrate'] = 0\n\tdata = data.reset_index(drop=True)\n\treturn data\n\n# Simple 'save csv file to the database'\ndef importToDatabase(filename):\n\tfile = 'card_data/' + filename + '.csv' # CSV file to import into database\n\tconnection = sqlite3.connect('card_data/stattracker.db')\n\n\t# Reading csv file to database\n\tdata = pd.read_csv(file)\n\tdel data[\"DeckCode\"] # deletes the column with the deck code\n\tdel data[\"Unnamed: 0\"] # deletes that extra column at the beginning\n\tdata.to_sql(filename, connection, if_exists='replace', index=False)\n\tos.remove(file)\n\n# Adds a list of decks from excel file to databse\n# Changes the names of the decks to the deck codes\ndef addDeckDB(filename):\n\tfile = \"card_data/\" + filename + \".xlsx\"\n\tdeckList = pd.read_excel(file, sheet_name=None)\n\n\tfor x in deckList.keys():\n\t\ttemp = pd.read_excel(file, sheet_name=x)\n\t\tdeckName = temp.iloc[0]['DeckCode']\n\t\ttemp.to_csv('card_Data/' + deckName + '.csv', header=True)\n\t\timportToDatabase(deckName)\n\n# adds stats from a game to database\n# deckName = deck code\n# gameStates = an array with\n# - Win/Losses - W or L\n# - Opponent Regions - String (Region1 / Region2)\n# - Opponent Champs - String (Champion1 / Champion2 / Champion3 / ...)\ndef addGameDB(deckname, gameStats):\n\tconnection = sqlite3.connect('card_data/stattracker.db')\n\tc = connection.cursor()\n\n\toutcome = gameStats [0]\n\tregions = gameStats [1]\n\tchampions = gameStats [2]\n\twith connection:\n\t\tc.execute(\"CREATE TABLE IF NOT EXISTS \" + deckname + \" ('Win/Loss', 'Opponent Regions', 'Opponent Champs')\")\n\t\tc.execute(\"INSERT INTO \" + deckname + \" VALUES(?, ?, ?)\", (outcome, regions, champions))\n\n# Grabs a deck from the database and returns it\ndef getDeck(deckname):\n\tconnection = sqlite3.connect('card_data/stattracker.db')\n\tc = connection.cursor()\n\twith connection:\n\t\tc.execute(\"CREATE TABLE IF NOT EXISTS \" + deckname + \" ('Win/Loss', 'Opponent Regions', 'Opponent Champs')\")\n\t\tc.execute(\"SELECT * FROM \" + deckname)\n\t\treturn c.fetchall()\n\n# checks if user exists\n# if not adds them to database and returns true\n# if yes, returns false\ndef createUser(name):\n\t# create an connection\n\tconnection = sqlite3.connect('card_data/usersdecks.db')\n\tc = connection.cursor()\n\n\twith connection:\n\t\tc.execute(\"SELECT count(*) FROM sqlite_master WHERE type='table' AND name='\" + name + \"'\")\n\t\tif c.fetchone()[0] == 1:\n\t\t\treturn True # user already exists\n\t\tc.execute(\"CREATE TABLE IF NOT EXISTS \" + name + \" ('Deckname', 'Deckcode')\")\n\n\treturn False # returns that it didn't exist\n\n# user = name of user\n# stats = deckname (from user), deckcode\n# ^^ is an array so it's easier if in the future we want to save more information about the user\ndef addUserDeck(user, stats):\n\t# create connection\n\tconnection = sqlite3.connect('card_data/usersdecks.db')\n\tc = connection.cursor()\n\twith connection:\n\t\tc.execute(\"SELECT EXISTS (SELECT 1 FROM \" + user + \" WHERE Deckcode='\" + stats[1] + \"')\")\n\t\texisting = c.fetchone()[0]\n\t\tif existing:\n\t\t\treturn True\n\t\telse:\n\t\t\tc.execute(\"INSERT INTO \" + user + \" VALUES(?, ?)\", (stats[0], stats[1]))\n\t\t\tc.close()\n\n\tmastConnection = sqlite3.connect('card_data/stattracker.db')\n\tc = mastConnection.cursor()\n\tc.execute(\"CREATE TABLE IF NOT EXISTS \" + stats[1] + \" ('Win/Loss', 'Opponent Regions', 'Opponent Champs')\")\n\tc.close()\n\treturn False # used for display an error message\n\n# grabs the users deckname and corresponding deck codes\ndef grabUsersDecks(user):\n\tconnection = sqlite3.connect('card_data/usersdecks.db')\n\tc = connection.cursor()\n\n\twith connection:\n\t\tc.execute(\"SELECT Deckname, Deckcode FROM \" + user)\n\n\treturn c.fetchall()\n\ndef get_champs():\n\tdata = get_dataframe()\n\treturn data[data['rarity'] == 'Champion']['name'].to_list()\n\n## Takes in a valid card code and returns a pandas dataframe with\ndef buildFromCode(code):\n\tdata = get_dataframe()\n\tdeck = LoRDeck.from_deckcode(code)\n\n\t# codes = [(card.card_code, card.count) for card in deck.cards]\n\tnewDeck = pd.DataFrame(columns=data.columns)\n\n\tfor i, card in enumerate(deck.cards):\n\t\trow = data.loc[data['cardCode'] == card.card_code]\n\t\tnewDeck = newDeck.append(row, ignore_index=True)\n\t\tnewDeck.loc[i, 'count'] = int(card.count)\n\tnewDeck['count'] = newDeck['count'].astype(int)\n\treturn newDeck\n\n## Creates a code from deck\ndef exportCode(deck):\n\tcol = deck['count'].apply(int).apply(str) + ':' + deck['cardCode']\n\tdeck = LoRDeck(col.to_list())\n\treturn deck.encode()\n\nif __name__ == '__main__':\n\tdata = get_dataframe() # do not delete\n\n\t# getDeck(\"temp\")\n\n\tdeck = buildFromCode('CICACAYABYBAEBQFCYBAEAAGBEDQCAABBEFR2JJHGMBACAIACUAQEAAHAA')\n\tprint(deck)\n\tprint(exportCode(deck))" }, { "alpha_fraction": 0.46919432282447815, "alphanum_fraction": 0.6872037649154663, "avg_line_length": 15.230769157409668, "blob_id": "a5b9be99e37c24245a6aa4d8c9f02e6391017861", "content_id": "ba8e58bb0d884fc7b7183c783552e186e4a0e350", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 211, "license_type": "no_license", "max_line_length": 22, "num_lines": 13, "path": "/requirements.txt", "repo_name": "COSC481W-2020Fall/cosc481w-581-2020-fall-stattracker", "src_encoding": "UTF-8", "text": "click==7.1.2\nFlask==1.1.2\nitsdangerous==1.1.0\nJinja2==2.11.2\nlor-deckcodes==1.2.0\nMarkupSafe==1.1.1\nnumpy==1.19.2\npandas==1.1.2\npython-dateutil==2.8.1\npytz==2020.1\nsix==1.15.0\nSQLAlchemy==1.3.20\nWerkzeug==1.0.1\n" } ]
10
br3d/ansible-freeton
https://github.com/br3d/ansible-freeton
1c78abdb473c68d104e0c630487f932ba53e9cdf
dfdc4bd97f6d7ddc0540caf8bf88e2ce115ca57f
0aa75a7476b463e0e419e55918d034b84c3cfd0a
refs/heads/master
2023-01-31T01:35:22.158051
2020-12-13T15:15:17
2020-12-13T15:15:17
266,032,349
19
8
null
null
null
null
null
[ { "alpha_fraction": 0.7435417175292969, "alphanum_fraction": 0.7536503076553345, "avg_line_length": 38.27941131591797, "blob_id": "5c5d4e7df1dc8c5ca41ccf0980b06ad7cfd979e4", "content_id": "e21210b91a63e145c58ec01278f105583f1dbc2b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2671, "license_type": "no_license", "max_line_length": 150, "num_lines": 68, "path": "/README.md", "repo_name": "br3d/ansible-freeton", "src_encoding": "UTF-8", "text": "# ansible-freeton\n\nRoles of Ansible for install and monitor FreeTon node.\n\n## System requirements\n\n- Ubuntu 18 or newest\n\n## Roles:\n\n- **common** - preparing system and install dependencies\n- **freeton** - build and setup FreeTon node\n- **netdata** - real-time monitoring\n- **prometheus-node-exporter** - exporter for hardware and OS metrics exposed, also this gives opportunity get _balance_ and _diff_ in freeton network\n\n## Functional\n\n- Freeton Install\n\n - Creating user and group\n - Cronjob for validator script\n - All logs in one folder /var/log/...\n - Systemd for control status of node and restart in fail case\n - Logrotate for archive logs\n\n- Node Monitoring\n - Install netdata for realtime status <host>/netdata\n - install prometheus-node-exporter for collect metrics\n - collecting data about node status(node diff, wallet balance, total validators, if your node became validator, open elections)\n- Install nginx for close entry poins of monitoring systems\n- Install and sync ntp server for avoid time shift\n\n* System upgrade\n\n## Installation\n\n- Pull repository\n- Add your host to `freeton` file\n- Change role for installation (common should be always)\n- Change nginx user/password for basic_auth in `vars/variables.yml`\n- Add telegram bot token and group/chat id in `vars/variables.yml`\n- Run ansible: `ansible-playbook freeton.yaml -i freeton --ask-sudo-pass`\n- Ansible Build and setup node and save seed phrase `{{ install_path }}/ton-keys/seed_phrase.secret`\n- Deploy wallet [instruction](https://docs.ton.dev/86757ecb2/v/0/p/94921e-multisignature-wallet-management-in-tonos-cli)\n- install grafana [FreeTon Validator Dashboard](https://grafana.com/grafana/dashboards/13394)\n\n## Custom metrics in prometheus-node-exporter\n\n- **ton_node_diff** - seconds until synchronization will complete\n- **ton_node_balance** - current wallet balance\n- **ton_total_validators** - number of validators\n- **ton_election_num** - election numbers\n- **ton_elections** - election status (0 - closed 1 - open)\n- **ton_aggregateBlockSignatures** - number of signed blocks by node\n- **ton_getTransactionsCount** - numbers of transaction\n- **ton_getAccountsCount** - total accounts in net.ton.dev network\n- **ton_getAccountsTotalBalance** - total balance of all accounts\n- **ton_aggregateBlocks** - blocks by current validators\n\n## Alerts\n\nBefore install grafana template (grafana\\*freeton\\*node\\*alerts.json) please replace \"\\_NODE-IP:8080\" on IP address and port of your server.\n\n## Example Dashboard based on prometheus-node-exporter\n\n![Alt text](images/dashboard.png?raw=true \"FreeTon dashboard\")\n\n![Alt text](images/dashboard2.png?raw=true \"FreeTon dashboard part2\")\n" }, { "alpha_fraction": 0.5723684430122375, "alphanum_fraction": 0.6030701994895935, "avg_line_length": 37.08333206176758, "blob_id": "31fc0e3e10a3166759a3b468b29e0ddbf04635fe", "content_id": "aaf48c35c83038443828f74b5cada8c0cc8a433e", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 456, "license_type": "permissive", "max_line_length": 107, "num_lines": 12, "path": "/roles/prometheus-node-exporter/templates/node_diff.sh.j2", "repo_name": "br3d/ansible-freeton", "src_encoding": "UTF-8", "text": "#!/bin/bash -eE\nTIME_DIFF=0\n\nfor i in $(\"{{ install_path }}/{{ ton_src }}/ton/build/validator-engine-console/validator-engine-console\" \\\n -a 127.0.0.1:3030 \\\n -k \"/home/{{ ton_user }}/ton-keys/client\" \\\n -p \"/home/{{ ton_user }}/ton-keys/server.pub\" \\\n -c \"getstats\" -c \"quit\" 2>&1 | grep time | awk '{print $2}'); do\n TIME_DIFF=$((i - TIME_DIFF))\ndone\n\necho \"ton_node_diff ${TIME_DIFF}\" > {{ node_exporter_custom_metrics_folder }}/diff.prom" }, { "alpha_fraction": 0.5634958744049072, "alphanum_fraction": 0.6180589199066162, "avg_line_length": 65.83870697021484, "blob_id": "d02861cdedc9e8d6a89cf6077b62a7310e08f434", "content_id": "03304a4585f19694f3b4d4ac25fcf3fddc3310a5", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2071, "license_type": "permissive", "max_line_length": 135, "num_lines": 31, "path": "/roles/prometheus-node-exporter/templates/node_balance.sh.j2", "repo_name": "br3d/ansible-freeton", "src_encoding": "UTF-8", "text": "#!/bin/bash -eE\nMSIG_ADDR=`cat /home/{{ ton_user }}/ton-keys/$HOSTNAME.addr`\nHELPER_ADDR=`cat /home/{{ ton_user }}/ton-keys/helper.addr`\nDEPOOL_ADDR=`cat /home/{{ ton_user }}/ton-keys/depool.addr`\nPROXY0_ADDR=`cat /home/{{ ton_user }}/ton-keys/proxy0.addr`\nPROXY1_ADDR=`cat /home/{{ ton_user }}/ton-keys/proxy1.addr`\n\necho \"ton_node_msig_balance $((`\"{{ install_path }}/{{ ton_src }}/ton/build/lite-client/lite-client\" \\\n -p \"/home/{{ ton_user }}/ton-keys/liteserver.pub\" \\\n -a 127.0.0.1:3031 -rc \"getaccount $MSIG_ADDR\" \\\n -rc \"quit\" | sed -n -e 's/^.*balance is //p' | head -c-3` / 1000000000))\" > {{ node_exporter_custom_metrics_folder }}/balance.prom\n\necho \"ton_node_depool_balance $((`\"{{ install_path }}/{{ ton_src }}/ton/build/lite-client/lite-client\" \\\n -p \"/home/{{ ton_user }}/ton-keys/liteserver.pub\" \\\n -a 127.0.0.1:3031 -rc \"getaccount $DEPOOL_ADDR\" \\\n -rc \"quit\" | sed -n -e 's/^.*balance is //p' | head -c-3` / 1000000000))\" >> {{ node_exporter_custom_metrics_folder }}/balance.prom\n\necho \"ton_node_proxy0_balance $((`\"{{ install_path }}/{{ ton_src }}/ton/build/lite-client/lite-client\" \\\n -p \"/home/{{ ton_user }}/ton-keys/liteserver.pub\" \\\n -a 127.0.0.1:3031 -rc \"getaccount $PROXY0_ADDR\" \\\n -rc \"quit\" | sed -n -e 's/^.*balance is //p' | head -c-3` / 1000000000))\" >> {{ node_exporter_custom_metrics_folder }}/balance.prom\n\necho \"ton_node_proxy1_balance $((`\"{{ install_path }}/{{ ton_src }}/ton/build/lite-client/lite-client\" \\\n -p \"/home/{{ ton_user }}/ton-keys/liteserver.pub\" \\\n -a 127.0.0.1:3031 -rc \"getaccount $PROXY1_ADDR\" \\\n -rc \"quit\" | sed -n -e 's/^.*balance is //p' | head -c-3` / 1000000000))\" >> {{ node_exporter_custom_metrics_folder }}/balance.prom\n\necho \"ton_node_helper_balance $((`\"{{ install_path }}/{{ ton_src }}/ton/build/lite-client/lite-client\" \\\n -p \"/home/{{ ton_user }}/ton-keys/liteserver.pub\" \\\n -a 127.0.0.1:3031 -rc \"getaccount $HELPER_ADDR\" \\\n -rc \"quit\" | sed -n -e 's/^.*balance is //p' | head -c-3` / 1000000000))\" >> {{ node_exporter_custom_metrics_folder }}/balance.prom" }, { "alpha_fraction": 0.5214060544967651, "alphanum_fraction": 0.5488958954811096, "avg_line_length": 32.12686538696289, "blob_id": "5f6b6614bdfb5aba0fe0440c0717a9295f63088a", "content_id": "ecce63126f911872af6f9058dbe82f97e33caaab", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4438, "license_type": "permissive", "max_line_length": 155, "num_lines": 134, "path": "/roles/prometheus-node-exporter/templates/node_watcher.py.j2", "repo_name": "br3d/ansible-freeton", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nimport config\nimport requests\nimport socket\nimport subprocess\nimport json\n\n\nbot_key = \"{{ watcher_bot_key }}\"\ng_id = \"{{ watcher_g_id }}\"\nl_dir = \"{{ node_exporter_custom_metrics_folder }}\"\nl_file = \"node_metrics.prom\"\ntc_dir = \"{{ install_path }}/{{ ton_src }}/ton/build/utils/\"\ntk_dir = \"/home/{{ ton_user }}/ton-keys/\"\ns_dir = \"{{ install_path }}/{{ ton_src }}/scripts/\"\nok_file = \"{{ node_exporter_custom_metrics_folder }}/old_addr\"\nnode_file = \"{{ node_exporter_custom_metrics_folder }}/node_id\"\n\n\n# Wallet balance\ndef check_balance():\n try:\n addr = open(tk_dir + socket.gethostname() + \".addr\")\n wlt = addr.read().strip()\n bCmnd = tc_dir + \"tonos-cli account \" + \\\n wlt + \" | grep -i 'balance' | awk '{print $2}' \"\n b = str(subprocess.check_output(bCmnd, shell=True,\n executable='/bin/bash', encoding='utf-8'))\n b = int(int(b) / 1000000000)\n return b\n except:\n tg_notification(\"Can't get wallet balance\")\n\n\n# Node diff\ndef check_diff():\n try:\n dCmnd = s_dir + \\\n \"check_node_sync_status.sh | grep TIME_DIFF | awk '{print $4}'\"\n d = int(subprocess.check_output(dCmnd, shell=True,\n executable='/bin/bash', encoding='utf-8'))\n return d\n except:\n tg_notification(\"Can't get node time diff\")\n\n# Check validator\ndef validator():\n try:\n newK = open(tk_dir + \"elections/\" + socket.gethostname() + \"-election-adnl-key\")\n nk = newK.read().lower()\n newN = open(tk_dir + \"elections/\" + socket.gethostname() + \"-election-key\")\n nn = newN.read().lower()\n oldK = open(ok_file)\n ok = oldK.read().lower().strip()\n for line in nk.split(\"\\n\"):\n if \"created new key\" in line:\n k = line[16:].strip()\n for line in nn.split(\"\\n\"):\n if \"created new key\" in line:\n n = line[16:].strip()\n vCmnd = tc_dir + \"tonos-cli getconfig 34\"\n v = str(subprocess.check_output(vCmnd, shell = True, executable='/bin/bash', encoding='utf-8'))\n j = json.loads(v[42:])\n val = 0\n for list in j[\"list\"]:\n if list['adnl_addr'].strip() == ok:\n val = list\n elif list['adnl_addr'].strip() == k:\n var = list\n with open(ok_file, \"w\") as text_f:\n text_f.write(format(k))\n with open(node_file, \"w\") as node_f:\n node_f.write(format(n))\n return val, j[\"total\"]\n except:\n tg_notification(\"Can't get validator information\")\n\n# Election nubmer\ndef e_num():\n try:\n eCmnd = tc_dir + \\\n \"tonos-cli runget -1:3333333333333333333333333333333333333333333333333333333333333333 active_election_id | grep -i 'Result' | awk '{print $2}'\"\n e = str(subprocess.check_output(eCmnd, shell = True, executable='/bin/bash', encoding='utf-8'))\n n = e[2:-3]\n if str(n) == \"0x0\":\n n = 0\n return int(n)\n return int(n, 16)\n except:\n tg_notification(\"Can't get election number\")\n\n# Telegram notification\ndef tg_notification(msg):\n botToken = bot_key\n botChatID = g_id\n message = '<b>ALERT!</b> %s!' % msg\n sendText = 'https://api.telegram.org/bot' + botToken + \\\n '/sendMessage?chat_id=' + botChatID + '&parse_mode=html&text=' + message\n response = requests.get(sendText)\n return response.json()\n\n\nif __name__ == '__main__':\n balance = check_balance()\n if balance < 10001:\n tg_notification(\"Node balance is\" + str(balance))\n\n diff = check_diff()\n if diff < -50:\n tg_notification(\"Diff is\" + str(diff))\n \n election_num = e_num()\n\n if election_num == 0:\n elections = 0\n else:\n elections = 1\n\n validator = validator()\n\n if validator[0] != 0:\n v = 1\n weight = validator[0][\"weight\"]\n else:\n v = 0\n weight = 0\n \n with open(l_dir + l_file, \"w\") as text_file:\n text_file.write(\"node_diff {0}\\n\".format(diff))\n text_file.write(\"node_balance {0}\\n\".format(balance))\n text_file.write(\"total_validators {0}\\n\".format(validator[1]))\n text_file.write(\"elections {0}\\n\".format(elections))\n text_file.write(\"validator {0}\\n\".format(v))\n text_file.write(\"validator_weight {0}\\n\".format(weight))" } ]
4
willbengtson/python-sdk
https://github.com/willbengtson/python-sdk
9aef41be2baf71df5bd7c8894e3f1737521992e7
7c787407f4418c68af72ef12ee2565675f5f593a
8b448b3253aebd19cfe9cd73c5c1289b4663d038
refs/heads/master
2022-02-26T17:14:32.260094
2019-10-10T18:22:45
2019-10-10T18:22:45
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.680701732635498, "alphanum_fraction": 0.6923976540565491, "avg_line_length": 25.65625, "blob_id": "b7199416c8b086d94bb15eebe5be318a02096b93", "content_id": "b3b244b83f6a1dc810deb78bd4138491d4251e21", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 855, "license_type": "permissive", "max_line_length": 244, "num_lines": 32, "path": "/docs/installation.rst", "repo_name": "willbengtson/python-sdk", "src_encoding": "UTF-8", "text": ".. _installation:\n\nInstallation\n============\n\nSupported Python Versions\n-------------------------\n\nThis library is supported for Python 2 and 3, for versions 2.7+ and 3.4+ respectively. It is recommended that Python 2 users use python 2.7.9+ to take advantage of the SSL Certificate Validation feature that is not included in earlier versions.\n\nInstallation\n------------\n\nYou can install the Apility.io Python Client Library with **pip**:\n\n.. code-block:: python\n\n $ sudo pip install apilityio-lib\n\n\nAPI Documentation\n-----------------\nYou can read the Python documentation :ref:`apireference`.\n\nQuickstart\n----------\nHow to start with the Python API quickly :ref:`quickstart`.\n\n\nExamples\n--------\nIf you would like to obtain code examples for any of the included client libraries, you can find it in the **examples** folder of the Github repository.\n\n\n" }, { "alpha_fraction": 0.6203228235244751, "alphanum_fraction": 0.6224467158317566, "avg_line_length": 43.74876022338867, "blob_id": "1486c6297d15d3e93ddb9b447bef483ced49f21b", "content_id": "28d160006ea28d9cb96fcb30090e2f0699bc1832", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 54146, "license_type": "permissive", "max_line_length": 399, "num_lines": 1210, "path": "/apilityio/client.py", "repo_name": "willbengtson/python-sdk", "src_encoding": "UTF-8", "text": "# Copyright 2017-2018 CAPITAL LAB OU\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nclient module\n-------------\n\nThis module contains the class Client that implements all the logic of the client to connect to the API services of\nApility.io.\n\nAll the methods return an Object that encapsulates the HTTP response status code, the error (if any),\nand the collection of objects needed.\n\n\"\"\"\n\nimport ipaddress\nimport requests\nimport logging\nimport validators\nimport time\n\nfrom uuid import UUID\n\nimport apilityio.model as model\nimport apilityio.common as common\nimport apilityio.errors as errors\n\n_logger = logging.getLogger(__name__)\n\n\nclass Client(object):\n \"\"\"Create the web service client to access the API. This class implements all the logic of the client to connect to the API services of Apility.io.\n\n Keyword Arguments:\n - ``api_key``: A string containing your Apility.io API Key.\n - ``protocol``: A string containing the protocol to connect to the API. Protocols allowed HTTP and HTTPS. Default protocol is HTTPS.\n - ``host``: A string containing the FQDN of the host runnign the API.\n\n Raises:\n :func:`~apilityio.errors.ApilityioValueError`:: If the provided arguments cannot connect to the API Service.\n \"\"\"\n\n def __init__(self, api_key=None, protocol=common.HTTPS_PROTOCOL, host=common.DEFAULT_HOST):\n self._api_key = api_key\n if api_key is not None and not self._ValidateUUID(api_key):\n raise errors.ApilityioValueError(\n 'Not a valid API KEY. Is this a UUID?')\n\n if protocol not in [common.HTTPS_PROTOCOL, common.HTTP_PROTOCOL]:\n raise errors.ApilityioValueError('Not a valid Protocol.')\n\n self._protocol = protocol\n self._host = host\n\n def _ValidateUUID(self, uuid_string):\n \"\"\"\n Validate that a UUID string is in\n fact a valid uuid.\n\n Happily, the uuid module does the actual\n checking for us.\n\n It is vital that the 'version' kwarg be passed\n to the UUID() call, otherwise any 32-character\n hex string is considered valid.\n \"\"\"\n\n try:\n UUID(uuid_string, version=4)\n except ValueError:\n # If it's a value error, then the string\n # is not a valid hex code for a UUID.\n return False\n\n return True\n\n def _GetURL(self):\n return '%s://%s' % (self._protocol, self._host)\n\n def _ValidateIP(self, ip_address):\n \"\"\"Validate if this is well formated ip address\n \"\"\"\n try:\n ipaddress.ip_address(ip_address)\n except Exception:\n raise errors.ApilityioValueError('Not a valid IP address.')\n\n def _ValidateIPList(self, ip_addresses):\n \"\"\"Validate if all the elements are well formated ip address list\n \"\"\" \n if ip_addresses is None or len(ip_addresses) == 0:\n raise errors.ApilityioValueError('Empty list.')\n try:\n for ip_address in ip_addresses:\n ipaddress.ip_address(ip_address)\n except Exception:\n raise errors.ApilityioValueError('Not a valid IP address')\n\n def _ValidateDomain(self, domain):\n \"\"\"Validate if this is well formated domain\n \"\"\"\n try:\n return validators.domain(domain)\n except Exception:\n raise errors.ApilityioValueError('Not a valid Domain.')\n\n def _ValidateDomainList(self, domains):\n \"\"\"Validate if all the elements are well formed domains list\n \"\"\"\n if domains is None or len(domains) == 0:\n raise errors.ApilityioValueError('Empty list.')\n try:\n for domain in domains:\n validators.domain(domain)\n except Exception:\n raise errors.ApilityioValueError('Not a valid Domain.')\n\n def _ValidateEmail(self, email):\n \"\"\"Validate if this is well formated email\n \"\"\"\n try:\n return validators.email(email)\n except Exception:\n raise errors.ApilityioValueError('Not a valid Email.')\n\n def _ValidateEmailList(self, emails):\n \"\"\"Validate if all the elements are well formed emails list\n \"\"\"\n if emails is None or len(emails) == 0:\n raise errors.ApilityioValueError('Empty list.')\n try:\n for email in emails:\n validators.email(email)\n except Exception:\n raise errors.ApilityioValueError('Not a valid Email.')\n\n def _ValidateASNum(self, asnum):\n \"\"\"Validate if this is well formated asnum\n \"\"\"\n try:\n asnumber = int(asnum)\n if asnumber <= 0:\n raise errors.ApilityioValueError(\n 'Not a valid ASNUM. Negative number.')\n return True\n except Exception:\n raise errors.ApilityioValueError(\n 'Not a valid ASNUM. It is a string.')\n\n def _ValidateASNumList(self, as_numbers):\n \"\"\"Validate if all the elements are well formed AS number list\n \"\"\"\n if as_numbers is None or len(as_numbers) == 0:\n raise errors.ApilityioValueError('Empty list.')\n try:\n for as_number in as_numbers:\n asnum = int(as_number)\n if asnum <= 0:\n raise errors.ApilityioValueError(\n 'Not a valid ASNUM. Negative number.')\n except Exception:\n raise errors.ApilityioValueError(\n 'Not a valid ASNUM. It is a string.')\n\n def _ValidateTimestampSeconds(self, timestamp):\n \"\"\"Validate if this is well formated timestamp\n \"\"\"\n try:\n timestamp = int(timestamp)\n if timestamp <= 0:\n raise errors.ApilityioValueError(\n 'Not a valid Timestamp. Negative number.')\n return True\n except Exception:\n raise errors.ApilityioValueError(\n 'Not a valid Timestamp. It is a string.')\n\n def _ValidatePage(self, page):\n \"\"\"Validate if page is in the correct range\n \"\"\"\n try:\n page = int(page)\n if page < 1:\n raise errors.ApilityioValueError(\n 'Not a valid Page number. Must be bigger than 0.')\n return True\n except Exception:\n raise errors.ApilityioValueError(\n 'Not a valid Page number. It is a string.')\n\n def _ValidateItems(self, items):\n \"\"\"Validate if items is in the correct range\n \"\"\"\n try:\n items = int(items)\n if items < 5:\n raise errors.ApilityioValueError(\n 'Not a valid Items number. Must be bigger than 4.')\n return True\n except Exception:\n raise errors.ApilityioValueError(\n 'Not a valid Items number. It is a string.')\n\n def _ValidateTTL(self, ttl):\n \"\"\"Validate if the TTL is in the correct range\n \"\"\"\n try:\n ttl = int(ttl)\n if ttl < 0:\n raise errors.ApilityioValueError(\n 'Not a valid TTL number. Must be bigger than -1.')\n return True\n except Exception:\n raise errors.ApilityioValueError(\n 'Not a valid Items number. It is a string.')\n\n def _ValidateCountry(self, country):\n \"\"\"Validate if the country is valid 3166-1\n \"\"\"\n try:\n if len(country) != 2:\n raise errors.ApilityioValueError(\n 'Must be a two chars ISO 3166-1 code.')\n if country.upper() not in common.COUNTRY_LIST:\n raise errors.ApilityioValueError(\n 'Cannot find the country. Check the two chars code.')\n return True\n except Exception:\n raise errors.ApilityioValueError('Not a valid Country.')\n\n def _ValidateContinent(self, continent):\n \"\"\"Validate if the continent is valid two code value\n \"\"\"\n try:\n if len(continent) != 2:\n raise errors.ApilityioValueError(\n 'Must be a two chars continent code: EU, AS, NA, AF, AN, SA, OC')\n if continent.upper() not in common.CONTINENT_LIST:\n raise errors.ApilityioValueError(\n 'Cannot find the continent. Check the two chars code.')\n return True\n except Exception:\n raise errors.ApilityioValueError('Not a valid Continent.')\n\n def GetConnectionData(self):\n \"\"\"Return connection data used, the API KEY (if any), the protocol (http or https) and the hostname (api.apility.net by default).\n\n Returns:\n - ``api_key``: a string representing the Apility.io API KEY\n - ``protocol``: a string representing the connection protocol http or https.\n - ``host``: a string representing the FQDN where the Apility.io API is listening.\n \"\"\"\n return self._api_key, self._protocol, self._host\n\n def CheckIP(self, ip_address):\n \"\"\"Check the IP address belongs to any list of the blacklist databases of Apility.io. It also returns the blacklists where the IP address was found.\n\n Arguments:\n - ``ip_address``: A string containing the IP address to check.\n\n Returns:\n - :func:`~apilityio.model.BadIPResponse`: an object containing the HTTP status code response, the error (if any) and the list of blacklists where the IP address was found. A 404 HTTP response means that the IP address was not found in any blacklists. A 200 HTTP response means that the IP address was found in one or more blacklists and the developer can check the lists in the blacklists.\n\n Raises:\n - :func:`~apilityio.errors.ApilityioValueError`: If the provided argument is not valid IP address.\n \"\"\"\n self._ValidateIP(ip_address)\n\n endpoint = '%s/%s/%s' % (self._GetURL(), 'badip', ip_address)\n\n response = requests.request(\"GET\", endpoint, headers={\n 'X-Auth-Token': self._api_key, 'Accept': 'application/json'})\n\n _logger.debug('BadIp Endpoint: %s. Response: %s:%s' %\n (endpoint, response.status_code, response.text))\n\n if response.status_code == requests.codes.bad_request:\n dto = model.BadIPResponse(\n status_code=response.status_code, error='Bad Request.')\n return dto\n\n if response.status_code == requests.codes.not_found:\n dto = model.BadIPResponse(status_code=response.status_code)\n return dto\n\n if response.status_code == requests.codes.ok:\n json_dump = response.json()\n blacklists = json_dump['response']\n dto = model.BadIPResponse(blacklists=blacklists, json=json_dump)\n return dto\n\n return model.BadIPResponse(status_code=response.status_code, error=response.text)\n\n def CheckBatchIP(self, ip_addresses):\n \"\"\"Check if a list of IP addresses belong to any list of the blacklist databases of Apility.io. It also returns the blacklists where the IP addresses were found.\n\n Arguments:\n - ``ip_addresses``: A list composed of strings containing the IP addresses to check.\n\n Returns:\n - :func:`~apilityio.model.BadBatchIPResponse`: an object containing the HTTP status code response, the error (if any) and the list of blacklists where the IP addresses were found. For each IP address there is a list containing the blacklists where the IP was found. If the IP was not found in any blackllist then the list is empty.\n\n Raises:\n - :func:`~apilityio.errors.ApilityioValueError`: If the provided argument is not valid list of IP addresses.\n \"\"\"\n\n self._ValidateIPList(ip_addresses)\n\n endpoint = '%s/%s/%s' % (self._GetURL(),\n 'badip_batch', ','.join(ip_addresses))\n\n response = requests.request(\"GET\", endpoint, headers={\n 'X-Auth-Token': self._api_key, 'Accept': 'application/json'})\n\n _logger.debug('BadIp Endpoint: %s. Response: %s:%s' %\n (endpoint, response.status_code, response.text))\n\n if response.status_code == requests.codes.bad_request:\n dto = model.BadBatchIPResponse(\n status_code=response.status_code, error='Bad Request.')\n return dto\n\n if response.status_code == requests.codes.ok:\n json_dump = response.json()\n ipblacklists = json_dump['response']\n ipblacklists_set = set()\n for ipblacklist_pair in ipblacklists:\n ipblacklists_set.add(model.IPBlacklist(\n ipblacklist_pair['ip'], ipblacklist_pair['blacklists']))\n dto = model.BadBatchIPResponse(\n ipblacklists_set=ipblacklists_set, json=json_dump)\n return dto\n\n return model.BadBatchIPResponse(status_code=response.status_code, error=response.text)\n\n def GetGeoIP(self, ip_address):\n \"\"\"Get the IP address geo-location information.\n\n Arguments:\n - ``ip_address``: A string containing the IP address to geo-locate.\n\n Returns:\n - :func:`~apilityio.model.GeoIPResponse`: an object containing the HTTP status code response, the error (if any) and the object containing the geo location properties of the IP address.\n\n Raises:\n - :func:`~apilityio.errors.ApilityioValueError`: If the provided argument is not valid IP address.\n \"\"\"\n self._ValidateIP(ip_address)\n\n endpoint = '%s/%s/%s' % (self._GetURL(), 'geoip', ip_address)\n\n response = requests.request(\"GET\", endpoint, headers={\n 'X-Auth-Token': self._api_key, 'Accept': 'application/json'})\n\n _logger.debug('GeoIp Endpoint: %s. Response: %s:%s' %\n (endpoint, response.status_code, response.text))\n\n if response.status_code == requests.codes.bad_request:\n dto = model.GeoIPResponse(\n status_code=response.status_code, error='Bad Request.')\n return dto\n\n if response.status_code == requests.codes.ok:\n json_dump = response.json()\n geoip = json_dump['ip']\n dto = model.GeoIPResponse(geoip=geoip, json=json_dump)\n return dto\n\n return model.GeoIPResponse(status_code=response.status_code, error=response.text)\n\n def GetGeoBatchIP(self, ip_addresses):\n \"\"\"Get the gelocation information of a list of ip addresses passed as argument.\n\n Arguments:\n - ``ip_addresses``: A list of strings containing the IP addresses to geo-locate.\n\n Returns:\n - :func:`~apilityio.model.GeoBatchIPResponse`: an object containing the HTTP status code response, the error (if any) and a list of objects containing the geo location properties of the IP addresses.\n\n Raises:\n - :func:`~apilityio.errors.ApilityioValueError`: If the provided argument is not a list of valid IP addresses.\n \"\"\"\n\n self._ValidateIPList(ip_addresses)\n\n endpoint = '%s/%s/%s' % (self._GetURL(),\n 'geoip_batch', ','.join(ip_addresses))\n\n response = requests.request(\"GET\", endpoint, headers={\n 'X-Auth-Token': self._api_key, 'Accept': 'application/json'})\n\n _logger.debug('GeoIP Endpoint: %s. Response: %s:%s' %\n (endpoint, response.status_code, response.text))\n\n if response.status_code == requests.codes.bad_request:\n dto = model.GeoBatchIPResponse(\n status_code=response.status_code, error='Bad Request.')\n return dto\n\n if response.status_code == requests.codes.ok:\n json_dump = response.json()\n geolocated_ip_addresses = json_dump['response']\n geolocated_ip_list = []\n for geolocated_ip in geolocated_ip_addresses:\n geolocated_ip_list.append(model.IPGeodata(\n geolocated_ip['ip'], model.GeoIP(geolocated_ip['geoip'])))\n dto = model.GeoBatchIPResponse(\n geolocated_ip_list=geolocated_ip_list, json=json_dump)\n return dto\n\n return model.GeoBatchIPResponse(status_code=response.status_code, error=response.text)\n\n def CheckDomain(self, domain):\n \"\"\"Check the Domain and its MX and NS records belong to any list of the blacklist databases of Apility.io. It returns the scoring and blacklists where the Domain info was found.\n\n Arguments:\n - ``domain``: A string containing the domain to check.\n\n Returns:\n - :func:`~apilityio.model.BadDomainResponse`: an object containing the HTTP status code response, the error (if any) and the scoring and lists of blacklists where the Domain, MX and NS records were found. A 200 HTTP response means that the Domain, MX or NS records were found in one or more blacklists and the developer can check the scoring and the blacklists.\n\n Raises:\n - :func:`~apilityio.errors.ApilityioValueError`: If the provided argument is not valid FQDN.\n \"\"\"\n\n self._ValidateDomain(domain)\n\n endpoint = '%s/%s/%s' % (self._GetURL(), 'baddomain', domain)\n\n response = requests.request(\"GET\", endpoint, headers={\n 'X-Auth-Token': self._api_key, 'Accept': 'application/json'})\n\n _logger.debug('Baddomain Endpoint: %s. Response: %s:%s' %\n (endpoint, response.status_code, response.text))\n\n if response.status_code == requests.codes.bad_request:\n dto = model.BadDomainResponse(\n status_code=response.status_code, error='Bad Request.')\n return dto\n\n if response.status_code == requests.codes.ok:\n json_dump = response.json()\n baddomain_response = json_dump['response']\n dto = model.BadDomainResponse(\n domain_data=baddomain_response, json=json_dump)\n return dto\n\n return model.BadDomainResponse(status_code=response.status_code, error=response.text)\n\n def CheckBatchDomain(self, domains):\n \"\"\"Check if a list of Domain and its MX and NS records belong to any list of the blacklist databases of Apility.io. It returns a list of the scoring and blacklists where the Domains info were found.\n\n Arguments:\n - ``domains``: A list composed of strings containing the domains to check.\n\n Returns:\n - :func:`~apilityio.model.BadBatchDomainResponse`: an object containing the HTTP status code response, the error (if any) and the list of blacklists where the IP addresses were found. Also the list of domains pairing the scoring and lists of blacklists where the Domain, MX and NS records were found.\n\n Raises:\n - :func:`~apilityio.errors.ApilityioValueError`: If the provided argument is not valid list of domains.\n \"\"\"\n\n self._ValidateDomainList(domains)\n\n endpoint = '%s/%s/%s' % (self._GetURL(),\n 'baddomain_batch', ','.join(domains))\n\n response = requests.request(\"GET\", endpoint, headers={\n 'X-Auth-Token': self._api_key, 'Accept': 'application/json'})\n\n _logger.debug('BadDomain Endpoint: %s. Response: %s:%s' %\n (endpoint, response.status_code, response.text))\n\n if response.status_code == requests.codes.bad_request:\n dto = model.BadBatchDomainResponse(\n status_code=response.status_code, error='Bad Request.')\n return dto\n\n if response.status_code == requests.codes.ok:\n json_dump = response.json()\n domains = json_dump['response']\n domain_list = []\n for domain in domains:\n domain_list.append(model.DomainScored(\n domain['domain'], model.BadDomain(domain['scoring'])))\n dto = model.BadBatchDomainResponse(\n domain_scoring_list=domain_list, json=json_dump)\n return dto\n\n return model.BadBatchDomainResponse(status_code=response.status_code, error=response.text)\n\n def CheckEmail(self, email):\n \"\"\"Check the Email including all tests performed to the Domain plus a full SMTP test on the remote server. It returns the global scoring of the Email and each scoring per test performed.\n\n Arguments:\n - ``email``: A string containing the email to check.\n\n Returns:\n - :func:`~apilityio.model.BadEmailResponse`: an object containing the HTTP status code response, the error (if any) and the scoring and lists of blacklists where the Emal, SMTP server, MX and NS records were found. A 200 HTTP response means that the Domain, SMTP Server, MX or NS records were found in one or more blacklists and the developer can check the scoring and the blacklists.\n\n Raises:\n - :func:`~apilityio.errors.ApilityioValueError`: If the provided argument is not valid Email.\n \"\"\"\n self._ValidateEmail(email)\n\n endpoint = '%s/%s/%s' % (self._GetURL(), 'bademail', email)\n\n response = requests.request(\"GET\", endpoint, headers={\n 'X-Auth-Token': self._api_key, 'Accept': 'application/json'})\n\n _logger.debug('Bademail Endpoint: %s. Response: %s:%s' %\n (endpoint, response.status_code, response.text))\n\n if response.status_code == requests.codes.bad_request:\n dto = model.BadEmailResponse(\n status_code=response.status_code, error='Bad Request.')\n return dto\n\n if response.status_code == requests.codes.ok:\n json_dump = response.json()\n bademail_response = json_dump['response']\n dto = model.BadEmailResponse(\n email_data=bademail_response, json=json_dump)\n return dto\n\n return model.BadEmailResponse(status_code=response.status_code, error=response.text)\n\n def CheckBatchEmail(self, emails):\n \"\"\"Check if a list of Emails including all tests performed to the Domain plus a full SMTP test on the remote server. It returns the global scoring of each Email and each scoring per test performed.\n\n Arguments:\n - ``emails``: A list composed of strings containing the emails to check.\n\n Returns:\n - :func:`~apilityio.model.BadBatchEmailResponse`: an object containing the HTTP status code response, the error (if any). Also the list of emails pairing the scoring and lists of blacklists where the Emails, tests and domains and MX and NS records were found.\n\n Raises:\n - :func:`~apilityio.errors.ApilityioValueError`: If the provided argument is not valid list of emails.\n \"\"\"\n\n self._ValidateEmailList(emails)\n\n endpoint = '%s/%s/%s' % (self._GetURL(),\n 'bademail_batch', ','.join(emails))\n\n response = requests.request(\"GET\", endpoint, headers={\n 'X-Auth-Token': self._api_key, 'Accept': 'application/json'})\n\n _logger.debug('BadEmails Endpoint: %s. Response: %s:%s' %\n (endpoint, response.status_code, response.text))\n\n if response.status_code == requests.codes.bad_request:\n dto = model.BadBatchEmailResponse(\n status_code=response.status_code, error='Bad Request.')\n return dto\n\n if response.status_code == requests.codes.ok:\n json_dump = response.json()\n emails = json_dump['response']\n email_list = []\n for email in emails:\n email_list.append(model.EmailScored(\n email['email'], model.BadEmail(email['scoring'])))\n dto = model.BadBatchEmailResponse(\n email_scoring_list=email_list, json=json_dump)\n return dto\n\n return model.BadBatchEmailResponse(status_code=response.status_code, error=response.text)\n\n def GetASbyIP(self, ip_address):\n \"\"\"Get the Autonomous System information of a given IP address.\n\n Arguments:\n - ``ip_address``: A string containing the IP address to obtain information of its Autonomous System.\n\n Returns:\n - :func:`~apilityio.model.ASResponse`: an object containing the HTTP status code response, the error (if any) and the object containing the Autonomous System properties of the IP address.\n\n Raises:\n - :func:`~apilityio.errors.ApilityioValueError`: If the provided argument is not valid IP address.\n \"\"\"\n self._ValidateIP(ip_address)\n\n endpoint = '%s/%s/%s' % (self._GetURL(), 'as/ip', ip_address)\n\n response = requests.request(\"GET\", endpoint, headers={\n 'X-Auth-Token': self._api_key, 'Accept': 'application/json'})\n\n _logger.debug('AsIP Endpoint: %s. Response: %s:%s' %\n (endpoint, response.status_code, response.text))\n\n if response.status_code == requests.codes.bad_request:\n dto = model.ASResponse(\n status_code=response.status_code, error='Bad Request.')\n return dto\n\n if response.status_code == requests.codes.ok:\n json_dump = response.json()\n asystem = json_dump['as']\n dto = model.ASResponse(asystem=asystem, json=json_dump)\n return dto\n\n return model.ASResponse(status_code=response.status_code, error=response.text)\n\n def GetASbyNum(self, asnum):\n \"\"\"Get the Autonomous System information by its number (ASN).\n\n Arguments:\n - ``asnum``: An integer containing the ASN to obtain information of.\n\n Returns:\n - :func:`~apilityio.model.ASResponse`: an object containing the HTTP status code response, the error (if any) and the object containing the Autonomous System properties.\n\n Raises:\n - :func:`~apilityio.errors.ApilityioValueError`: If the provided argument is not AS number.\n \"\"\"\n self._ValidateASNum(asnum)\n\n endpoint = '%s/%s/%s' % (self._GetURL(), 'as/num', int(asnum))\n\n response = requests.request(\"GET\", endpoint, headers={\n 'X-Auth-Token': self._api_key, 'Accept': 'application/json'})\n\n _logger.debug('AsNum Endpoint: %s. Response: %s:%s' %\n (endpoint, response.status_code, response.text))\n\n if response.status_code == requests.codes.bad_request:\n dto = model.ASResponse(\n status_code=response.status_code, error='Bad Request.')\n return dto\n\n if response.status_code == requests.codes.ok:\n json_dump = response.json()\n asystem = json_dump['as']\n dto = model.ASResponse(asystem=asystem, json=json_dump)\n return dto\n\n return model.ASResponse(status_code=response.status_code, error=response.text)\n\n def GetASBatchByIP(self, ip_addresses):\n \"\"\"Get the Autonomous System information of a list of ip addresses passed as argument.\n\n Arguments:\n - ``ip_addresses``: A list of strings containing the IP addresses to get AS data.\n\n Returns:\n - :func:`~apilityio.model.ASBatchIPResponse`: an object containing the HTTP status code response, the error (if any) and a list of objects containing the Autonomous System properties of the IP addresses.\n\n Raises:\n - :func:`~apilityio.errors.ApilityioValueError`: If the provided argument is not a list of valid IP addresses.\n \"\"\"\n\n self._ValidateIPList(ip_addresses)\n\n endpoint = '%s/%s/%s' % (self._GetURL(),\n 'as_batch/ip', ','.join(ip_addresses))\n\n response = requests.request(\"GET\", endpoint, headers={\n 'X-Auth-Token': self._api_key, 'Accept': 'application/json'})\n\n _logger.debug('ASbyIP Endpoint: %s. Response: %s:%s' %\n (endpoint, response.status_code, response.text))\n\n if response.status_code == requests.codes.bad_request:\n dto = model.ASBatchIPResponse(\n status_code=response.status_code, error='Bad Request.')\n return dto\n\n if response.status_code == requests.codes.ok:\n json_dump = response.json()\n asystem_ip_addresses = json_dump['response']\n asystem_ip_list = []\n for asystem_ip in asystem_ip_addresses:\n asystem_ip_list.append(model.IPASystem(\n asystem_ip['ip'], model.ASystem(asystem_ip['as'])))\n dto = model.ASBatchIPResponse(\n asystem_ip_list=asystem_ip_list, json=json_dump)\n return dto\n\n return model.ASBatchIPResponse(status_code=response.status_code, error=response.text)\n\n def GetASBatchByNum(self, as_numbers):\n \"\"\"Get the Autonomous System information of a list of AS numbers passed as argument.\n\n Arguments:\n - ``as_numbers``: A list of integers containing the AS numbers to get AS data.\n\n Returns:\n - :func:`~apilityio.model.ASBatchIPResponse`: an object containing the HTTP status code response, the error (if any) and a list of objects containing the Autonomous System properties of the AS numbers.\n\n Raises:\n - :func:`~apilityio.errors.ApilityioValueError`: If the provided argument is not a list of valid AS numbers.\n \"\"\"\n\n self._ValidateASNumList(as_numbers)\n\n endpoint = '%s/%s/%s' % (self._GetURL(), 'as_batch/num',\n ','.join([str(x) for x in as_numbers]))\n\n response = requests.request(\"GET\", endpoint, headers={\n 'X-Auth-Token': self._api_key, 'Accept': 'application/json'})\n\n _logger.debug('ASbyNum Endpoint: %s. Response: %s:%s' %\n (endpoint, response.status_code, response.text))\n\n if response.status_code == requests.codes.bad_request:\n dto = model.ASBatchNumResponse(\n status_code=response.status_code, error='Bad Request.')\n return dto\n\n if response.status_code == requests.codes.ok:\n json_dump = response.json()\n asystem_numbers = json_dump['response']\n asystem_num_list = []\n for asystem_num in asystem_numbers:\n asystem_num_list.append(model.ASNASystem(\n asystem_num['asn'], model.ASystem(asystem_num['as'])))\n dto = model.ASBatchNumResponse(\n asystem_num_list=asystem_num_list, json=json_dump)\n return dto\n\n return model.ASBatchNumResponse(status_code=response.status_code, error=response.text)\n\n def GetWhoisIP(self, ip_address):\n \"\"\"Get the WHOIS information of a given IP address.\n\n Arguments:\n - ``ip_address``: A string containing the IP address to obtain information of its WHOIS database.\n\n Returns:\n - :func:`~apilityio.model.WhoisIPResponse`: an object containing the HTTP status code response, the error (if any) and the object containing the WHOIS properties of the IP address.\n\n Raises:\n - :func:`~apilityio.errors.ApilityioValueError`: If the provided argument is not valid IP address.\n \"\"\"\n\n self._ValidateIP(ip_address)\n\n endpoint = '%s/%s/%s' % (self._GetURL(), 'whois/ip', ip_address)\n\n response = requests.request(\"GET\", endpoint, headers={\n 'X-Auth-Token': self._api_key, 'Accept': 'application/json'})\n\n _logger.debug('WHOISIP Endpoint: %s. Response: %s:%s' %\n (endpoint, response.status_code, response.text))\n\n if response.status_code == requests.codes.bad_request:\n dto = model.WhoisIPResponse(\n status_code=response.status_code, error='Bad Request.')\n return dto\n\n if response.status_code == requests.codes.ok:\n json_dump = response.json()\n whois = json_dump['whois']\n dto = model.WhoisIPResponse(whois=whois, json=json_dump)\n return dto\n\n return model.WhoisIPResponse(status_code=response.status_code, error=response.text)\n\n def GetHistoryIP(self, ip_address, timestamp=None, items=5, page=1):\n \"\"\"Get the list of transactions of a given IP address in our database. For experts who wish to know the historical activity of the given IP address in our database.\n\n Arguments:\n - ``ip_address``: A string containing the IP address to obtain the historical information.\n - ``page``: (Optional) An integer starting with 1 to paginate the results of the query.\n - ``items``: (Optional) An integer with the number of items to return per page. From five to two hundred as maximum.\n - ``timestamp``: (Optional) An integer as UNIX time in seconds to limit the search. The search will be filtered by values less or equal than ``timestamp``.\n\n Returns:\n - :func:`~apilityio.model.HistoryIPResponse`: an object containing the HTTP status code response, the error (if any) and the object containing all historical information.\n\n Raises:\n - :func:`~apilityio.errors.ApilityioValueError`: If the provided arguments are not a valid IP address, page, items or timestamp.\n \"\"\"\n\n self._ValidateIP(ip_address)\n if timestamp:\n self._ValidateTimestampSeconds(timestamp)\n else:\n timestamp = int(time.time())\n self._ValidatePage(page)\n self._ValidateItems(items)\n\n endpoint = '%s/%s/%s?timestamp=%s&page=%s&items=%s' % (\n self._GetURL(), 'metadata/changes/ip', ip_address, timestamp, page, items)\n\n response = requests.request(\"GET\", endpoint, headers={\n 'X-Auth-Token': self._api_key, 'Accept': 'application/json'})\n\n _logger.debug('HISTORYIP Endpoint: %s. Response: %s:%s' %\n (endpoint, response.status_code, response.text))\n\n if response.status_code == requests.codes.unauthorized:\n dto = model.HistoryIPResponse(\n status_code=response.status_code, error='Bad Request.')\n return dto\n\n if response.status_code == requests.codes.ok:\n json_dump = response.json()\n history = json_dump['changes_ip']\n dto = model.HistoryIPResponse(history=history, json=json_dump)\n return dto\n\n return model.HistoryIPResponse(status_code=response.status_code, error=response.text)\n\n def GetHistoryDomain(self, domain, timestamp=None, items=5, page=1):\n \"\"\"Get the list of transactions of a given Domain in our database. For experts who wish to know the historical activity of the given domain in our database.\n\n Arguments:\n - ``domain``: A string containing the FQDN to obtain the historical information.\n - ``page``: (Optional) An integer starting with 1 to paginate the results of the query.\n - ``items``: (Optional) An integer with the number of items to return per page. From five to two hundred as maximum.\n - ``timestamp``: (Optional) An integer as UNIX time in seconds to limit the search. The search will be filtered by values less or equal than ``timestamp``.\n\n Returns:\n - :func:`~apilityio.model.HistoryDomainResponse`: an object containing the HTTP status code response, the error (if any) and the object containing all historical information.\n\n Raises:\n - :func:`~apilityio.errors.ApilityioValueError`: If the provided arguments are not a valid FQDN, page, items or timestamp.\n \"\"\"\n\n self._ValidateDomain(domain)\n if timestamp:\n self._ValidateTimestampSeconds(timestamp)\n else:\n timestamp = int(time.time())\n self._ValidatePage(page)\n self._ValidateItems(items)\n\n endpoint = '%s/%s/%s?timestamp=%s&page=%s&items=%s' % (\n self._GetURL(), 'metadata/changes/domain', domain, timestamp, page, items)\n\n response = requests.request(\"GET\", endpoint, headers={\n 'X-Auth-Token': self._api_key, 'Accept': 'application/json'})\n\n _logger.debug('HISTORYDOMAIN Endpoint: %s. Response: %s:%s' %\n (endpoint, response.status_code, response.text))\n\n if response.status_code == requests.codes.unauthorized:\n dto = model.HistoryDomainResponse(\n status_code=response.status_code, error='Bad Request.')\n return dto\n\n if response.status_code == requests.codes.ok:\n json_dump = response.json()\n history = json_dump['changes_domain']\n dto = model.HistoryDomainResponse(history=history, json=json_dump)\n return dto\n\n return model.HistoryDomainResponse(status_code=response.status_code, error=response.text)\n\n def GetHistoryEmail(self, email, timestamp=None, items=5, page=1):\n \"\"\"Get the list of transactions of a given Email in our database. For experts who wish to know the historical activity of the given Email in our database.\n\n Arguments:\n - ``email``: A string containing the Email to obtain the historical information.\n - ``page``: (Optional) An integer starting with 1 to paginate the results of the query.\n - ``items``: (Optional) An integer with the number of items to return per page. From five to two hundred as maximum.\n - ``timestamp``: (Optional) An integer as UNIX time in seconds to limit the search. The search will be filtered by values less or equal than ``timestamp``.\n\n Returns:\n - :func:`~apilityio.model.HistoryEmailResponse`: an object containing the HTTP status code response, the error (if any) and the object containing all historical information.\n\n Raises:\n - :func:`~apilityio.errors.ApilityioValueError`: If the provided arguments are not a valid Email, page, items or timestamp.\n \"\"\"\n\n self._ValidateEmail(email)\n if timestamp:\n self._ValidateTimestampSeconds(timestamp)\n else:\n timestamp = int(time.time())\n self._ValidatePage(page)\n self._ValidateItems(items)\n\n endpoint = '%s/%s/%s?timestamp=%s&page=%s&items=%s' % (\n self._GetURL(), 'metadata/changes/email', email, timestamp, page, items)\n\n response = requests.request(\"GET\", endpoint, headers={\n 'X-Auth-Token': self._api_key, 'Accept': 'application/json'})\n\n _logger.debug('HISTORYEMAIL Endpoint: %s. Response: %s:%s' %\n (endpoint, response.status_code, response.text))\n\n if response.status_code == requests.codes.unauthorized:\n dto = model.HistoryEmailResponse(\n status_code=response.status_code, error='Bad Request.')\n return dto\n\n if response.status_code == requests.codes.ok:\n json_dump = response.json()\n history = json_dump['changes_email']\n dto = model.HistoryEmailResponse(history=history, json=json_dump)\n return dto\n\n return model.HistoryEmailResponse(status_code=response.status_code, error=response.text)\n\n def GetQuarantineIP(self):\n \"\"\"Get the list of IP addresses in the quarantine. Quarantine is a private exclusion lists based on user IP address properties.\n\n Returns:\n - :func:`~apilityio.model.QuarantineIPResponse`: an object containing the HTTP status code response, the error (if any) and the object containing all the IP addresses in the quarantine.\n\n \"\"\"\n\n endpoint = '%s/%s' % (self._GetURL(), 'quarantine/ip')\n\n response = requests.request(\"GET\", endpoint, headers={\n 'X-Auth-Token': self._api_key, 'Accept': 'application/json'})\n\n _logger.debug('QUARANTINEIP GET Endpoint: %s. Response: %s:%s' %\n (endpoint, response.status_code, response.text))\n\n if response.status_code == requests.codes.unauthorized:\n dto = model.QuarantineIPResponse(\n status_code=response.status_code, error='Bad Request.')\n return dto\n\n if response.status_code == requests.codes.ok:\n json_dump = response.json()\n quarantine = json_dump['quarantined']\n dto = model.QuarantineIPResponse(quarantine=quarantine, json=json_dump)\n return dto\n\n return model.QuarantineIPResponse(status_code=response.status_code, error=response.text)\n\n def GetQuarantineCountry(self):\n \"\"\"Get the list of countries in the quarantine. Quarantine is a private exclusion lists based on user IP address properties. In this case, the country the IP belongs to.\n\n Returns:\n - :func:`~apilityio.model.QuarantineCountryResponse`: an object containing the HTTP status code response, the error (if any) and the object containing all the countries in the quarantine.\n\n \"\"\"\n\n endpoint = '%s/%s' % (self._GetURL(), 'quarantine/country')\n\n response = requests.request(\"GET\", endpoint, headers={\n 'X-Auth-Token': self._api_key, 'Accept': 'application/json'})\n\n _logger.debug('QUARANTINECOUNTRY GET Endpoint: %s. Response: %s:%s' % (\n endpoint, response.status_code, response.text))\n\n if response.status_code == requests.codes.unauthorized:\n dto = model.QuarantineCountryResponse(\n status_code=response.status_code, error='Bad Request.')\n return dto\n\n if response.status_code == requests.codes.ok:\n json_dump = response.json()\n quarantine = json_dump['quarantined']\n dto = model.QuarantineCountryResponse(quarantine=quarantine, json=json_dump)\n return dto\n\n return model.QuarantineCountryResponse(status_code=response.status_code, error=response.text)\n\n def GetQuarantineContinent(self):\n \"\"\"Get the list of continents in the quarantine. Quarantine is a private exclusion lists based on user IP address properties. In this case, the continent the IP belongs to.\n\n Returns:\n - :func:`~apilityio.model.QuarantineContinentResponse`: an object containing the HTTP status code response, the error (if any) and the object containing all the continents in the quarantine.\n\n \"\"\"\n\n endpoint = '%s/%s' % (self._GetURL(), 'quarantine/continent')\n\n response = requests.request(\"GET\", endpoint, headers={\n 'X-Auth-Token': self._api_key, 'Accept': 'application/json'})\n\n _logger.debug('QUARANTINECONTINENT GET Endpoint: %s. Response: %s:%s' % (\n endpoint, response.status_code, response.text))\n\n if response.status_code == requests.codes.unauthorized:\n dto = model.QuarantineContinentResponse(\n status_code=response.status_code, error='Bad Request.')\n return dto\n\n if response.status_code == requests.codes.ok:\n json_dump = response.json()\n quarantine = json_dump['quarantined']\n dto = model.QuarantineContinentResponse(quarantine=quarantine, json=json_dump)\n return dto\n\n return model.QuarantineContinentResponse(status_code=response.status_code, error=response.text)\n\n def GetQuarantineAS(self):\n \"\"\"Get the list of Autonomous Systems in the quarantine. Quarantine is a private exclusion lists based on user IP address properties. In this case, the AS the IP belongs to.\n\n Returns:\n - :func:`~apilityio.model.QuarantineASResponse`: an object containing the HTTP status code response, the error (if any) and the object containing all the AS in the quarantine.\n\n \"\"\"\n\n endpoint = '%s/%s' % (self._GetURL(), 'quarantine/as')\n\n response = requests.request(\"GET\", endpoint, headers={\n 'X-Auth-Token': self._api_key, 'Accept': 'application/json'})\n\n _logger.debug('QUARANTINEAS GET Endpoint: %s. Response: %s:%s' %\n (endpoint, response.status_code, response.text))\n\n if response.status_code == requests.codes.unauthorized:\n dto = model.QuarantineASResponse(\n status_code=response.status_code, error='Bad Request.')\n return dto\n\n if response.status_code == requests.codes.ok:\n json_dump = response.json()\n quarantine = json_dump['quarantined']\n dto = model.QuarantineASResponse(quarantine=quarantine, json=json_dump)\n return dto\n\n return model.QuarantineASResponse(status_code=response.status_code, error=response.text)\n\n def _AddQuarantineObject(self, object_type, object_value, ttl, object_uri_type=None):\n payload = {\"%s\" % object_type: object_value, \"ttl\": ttl}\n if object_uri_type is not None:\n object_type = object_uri_type\n endpoint = '%s/%s' % (self._GetURL(), 'quarantine/%s' % object_type)\n _logger.debug(payload)\n response = requests.request(\"POST\", endpoint, json=payload,\n headers={'X-Auth-Token': self._api_key, 'Accept': 'application/json'})\n _logger.debug('QUARANTINE%s POST Endpoint: %s. Response: %s:%s' % (\n object_type.upper(), endpoint, response.status_code, response.text))\n if response.status_code == requests.codes.unauthorized:\n dto = model.Response(\n status_code=response.status_code, error='Bad Request.')\n return dto\n if response.status_code == requests.codes.ok:\n dto = model.Response(status_code=response.status_code, error='OK.')\n return dto\n return model.Response(status_code=response.status_code, error=response.text)\n\n def AddQuarantineIP(self, ip_address, ttl=3600):\n \"\"\"Add an IP address for a given time to live in the quarantine list.\n\n Arguments:\n - ``ip_address``: A string containing a valid IP address to include in the QUARANTINE-IP list.\n - ``ttl``: (Optional) An integer as in seconds to limit the time to live the IP address in the list. By default is 3600 seconds. Zero value if the IP address will never expire in the list.\n\n Returns:\n - :func:`~apilityio.model.Response`: an object containing the HTTP status code response and the error (if any).\n\n Raises:\n - :func:`~apilityio.errors.ApilityioValueError`: If the provided arguments are not a valid IP address or TTL value.\n \"\"\"\n\n self._ValidateIP(ip_address)\n self._ValidateTTL(ttl)\n object_type = 'ip'\n\n return self._AddQuarantineObject(object_type, ip_address, ttl)\n\n def AddQuarantineCountry(self, country, ttl=3600):\n \"\"\"Add a country for a given time to live in the quarantine list.\n\n Arguments:\n - ``country``: A string containing a valid ISO-3166-1 country to include in the QUARANTINE-IP list.\n - ``ttl``: (Optional) An integer as in seconds to limit the time to live the country in the list. By default is 3600 seconds. Zero value if the country will never expire in the list.\n\n Returns:\n - :func:`~apilityio.model.Response`: an object containing the HTTP status code response and the error (if any).\n\n Raises:\n - :func:`~apilityio.errors.ApilityioValueError`: If the provided arguments are not a valid country code or TTL value.\n \"\"\"\n\n self._ValidateCountry(country)\n self._ValidateTTL(ttl)\n object_type = 'country'\n\n return self._AddQuarantineObject(object_type, country, ttl)\n\n def AddQuarantineContinent(self, continent, ttl=3600):\n \"\"\"Add a continent for a given time to live in the quarantine list.\n\n Arguments:\n - ``continent``: A string containing a valid continent to include in the QUARANTINE-CONTINENT list. Valid codes are EU, AS, NA, AF, AN, SA, OC.\n - ``ttl``: (Optional) An integer as in seconds to limit the time to live the continent in the list. By default is 3600 seconds. Zero value if the continent will never expire in the list.\n\n Returns:\n - :func:`~apilityio.model.Response`: an object containing the HTTP status code response and the error (if any).\n\n Raises:\n - :func:`~apilityio.errors.ApilityioValueError`: If the provided arguments are not a valid continent code or TTL value.\n \"\"\"\n\n self._ValidateContinent(continent)\n self._ValidateTTL(ttl)\n object_type = 'continent'\n\n return self._AddQuarantineObject(object_type, continent, ttl)\n\n def AddQuarantineAS(self, asnum, ttl=3600):\n \"\"\"Add an Autonomous System number for a given time to live in the quarantine list.\n\n Arguments:\n - ``asnum``: An integer containing a valid Autonomous System Number (ASN to include in the QUARANTINE-AS list.\n - ``ttl``: (Optional) An integer as in seconds to limit the time to live the AS in the list. By default is 3600 seconds. Zero value if the AS will never expire in the list.\n\n Returns:\n - :func:`~apilityio.model.Response`: an object containing the HTTP status code response and the error (if any).\n\n Raises:\n - :func:`~apilityio.errors.ApilityioValueError`: If the provided arguments are not a valid ASN or TTL value.\n \"\"\"\n\n self._ValidateASNum(asnum)\n self._ValidateTTL(ttl)\n object_type = 'asn'\n\n return self._AddQuarantineObject(object_type, asnum, ttl, object_uri_type='as')\n\n def _DeleteQuarantineObject(self, object_type, object_value):\n endpoint = '%s/%s/%s' % (self._GetURL(),\n 'quarantine/%s' % object_type, object_value)\n response = requests.request(\"DELETE\", endpoint, headers={\n 'X-Auth-Token': self._api_key})\n _logger.debug('QUARANTINE%s DELETE Endpoint: %s. Response: %s:%s' % (\n object_type.upper(), endpoint, response.status_code, response.text))\n if response.status_code == requests.codes.unauthorized:\n dto = model.Response(\n status_code=response.status_code, error='Bad Request.')\n return dto\n if response.status_code == requests.codes.ok:\n dto = model.Response(status_code=response.status_code, error='OK.')\n return dto\n return model.Response(status_code=response.status_code, error=response.text)\n\n def DeleteQuarantineIP(self, ip_address):\n \"\"\"Delete an IP address from the quarantine list.\n\n Arguments:\n - ``ip_address``: A string containing a valid IP address to remove of the QUARANTINE-IP list.\n\n Returns:\n - :func:`~apilityio.model.Response`: an object containing the HTTP status code response and the error (if any). If the IP address does not exists, it will also return a 200 status code.\n\n Raises:\n - :func:`~apilityio.errors.ApilityioValueError`: If the provided argument is not a valid IP address.\n \"\"\"\n\n self._ValidateIP(ip_address)\n object_type = 'ip'\n\n return self._DeleteQuarantineObject(object_type, ip_address)\n\n def DeleteQuarantineCountry(self, country):\n \"\"\"Delete a country from the quarantine list.\n\n Arguments:\n - ``country``: A string containing a valid ISO-3166-1 country code to remove of the QUARANTINE-COUNTRY list.\n\n Returns:\n - :func:`~apilityio.model.Response`: an object containing the HTTP status code response and the error (if any). If the country does not exists, it will also return a 200 status code.\n\n Raises:\n - :func:`~apilityio.errors.ApilityioValueError`: If the provided argument is not a valid country code.\n \"\"\"\n\n self._ValidateCountry(country)\n object_type = 'country'\n\n return self._DeleteQuarantineObject(object_type, country)\n\n def DeleteQuarantineContinent(self, continent):\n \"\"\"Delete a continent from the quarantine list.\n\n Arguments:\n - ``continent``: A string containing a valid continent code to remove of the QUARANTINE-CONTINENT list. Valid codes are EU, AS, NA, AF, AN, SA, OC.\n\n Returns:\n - :func:`~apilityio.model.Response`: an object containing the HTTP status code response and the error (if any). If the continent does not exists, it will also return a 200 status code.\n\n Raises:\n - :func:`~apilityio.errors.ApilityioValueError`: If the provided argument is not a valid continent code.\n \"\"\"\n\n self._ValidateContinent(continent)\n object_type = 'continent'\n\n return self._DeleteQuarantineObject(object_type, continent)\n\n def DeleteQuarantineAS(self, asn):\n \"\"\"Delete an Autonomous System from the quarantine list.\n\n Arguments:\n - ``asn``: A string containing a valid Autonomous System Number (ASN) to remove of the QUARANTINE-AS list.\n\n Returns:\n - :func:`~apilityio.model.Response`: an object containing the HTTP status code response and the error (if any). If the ASN does not exists, it will also return a 200 status code.\n\n Raises:\n - :func:`~apilityio.errors.ApilityioValueError`: If the provided argument is not a valid ASN.\n \"\"\"\n\n self._ValidateASNum(asn)\n object_type = 'as'\n\n return self._DeleteQuarantineObject(object_type, asn)\n" }, { "alpha_fraction": 0.602150559425354, "alphanum_fraction": 0.602150559425354, "avg_line_length": 12.428571701049805, "blob_id": "d55a86d363468f4e6488e674536e1cc6fdcd0785", "content_id": "106a99289f8bc9b824496581187041c3abf0bd4e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 93, "license_type": "permissive", "max_line_length": 32, "num_lines": 7, "path": "/docs/apireference.rst", "repo_name": "willbengtson/python-sdk", "src_encoding": "UTF-8", "text": ".. _apireference:\n\nAPI Reference\n=============\n\n.. automodule:: apilityio.client\n :members:" }, { "alpha_fraction": 0.6380830407142639, "alphanum_fraction": 0.6578717231750488, "avg_line_length": 41.448570251464844, "blob_id": "e9e393a90e819d74a5806cbfd1e58e93f8e9df45", "content_id": "81ed6ca2d102b09add34186efbff354ec04dddcc", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14857, "license_type": "permissive", "max_line_length": 90, "num_lines": 350, "path": "/tests/client_test_anonymous.py", "repo_name": "willbengtson/python-sdk", "src_encoding": "UTF-8", "text": "\"\"\"\nCopyright 2017-2018 CAPITAL LAB OU\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport unittest\nimport requests\nimport time\n\nimport apilityio.client as client\n\n\nclass ClientTestCase(unittest.TestCase):\n\n def setUp(self):\n # This is a test\n x = 1\n\n def tearDown(self):\n #This is anothertest\n x = 2\n\n def testCheckGoodIPAddressConnectionAnonymous(self):\n ip_sample = '8.8.8.8'\n connection = client.Client()\n dto = connection.CheckIP(ip_sample)\n self.assertEqual(dto.status_code, requests.codes.not_found)\n self.assertEqual(dto.error, None)\n self.assertEqual(dto.blacklists, [])\n time.sleep(10)\n\n def testCheckBadIPAddressConnectionAnonymous(self):\n ip_sample = '1.2.3.4'\n connection = client.Client()\n dto = connection.CheckIP(ip_sample)\n self.assertEqual(dto.status_code, requests.codes.ok)\n self.assertEqual(dto.error, None)\n self.assertNotEqual(dto.blacklists, [])\n time.sleep(10)\n\n def testCheckGoodBatchIPAddressesConnectionAnonymous(self):\n ip_sample = ['8.8.8.8', '9.9.9.9', '8.8.4.4']\n connection = client.Client()\n dto = connection.CheckBatchIP(ip_sample)\n self.assertEqual(dto.status_code, requests.codes.ok)\n self.assertEqual(dto.error, None)\n self.assertNotEqual(dto.ipblacklists_set, None)\n time.sleep(10)\n\n def testCheckBadBatchIPAddressesConnectionAnonymous(self):\n ip_sample = ['1.2.3.4', '114.223.63.139', '114.224.29.97']\n connection = client.Client()\n dto = connection.CheckBatchIP(ip_sample)\n self.assertEqual(dto.status_code, requests.codes.ok)\n self.assertEqual(dto.error, None)\n self.assertNotEqual(dto.ipblacklists_set, None)\n time.sleep(10)\n\n def testCheckBadBatchIPAddressesWrongFormatConnectionAnonymous(self):\n ip_sample = ['1.2.3.4', 'abcdef', 'mdmdmdmdm']\n connection = client.Client()\n try:\n dto = connection.CheckBatchIP(ip_sample)\n self.assertEqual(\n 1, 0, 'Wrong formated values should return an error.')\n except:\n self.assertEqual(\n 1, 1, 'Wrong formatted values interrupted execution.')\n time.sleep(10)\n\n def testGeoIPAddressConnectionAnonymous(self):\n ip_sample = '8.8.8.8'\n connection = client.Client()\n dto = connection.GetGeoIP(ip_sample)\n self.assertEqual(dto.status_code, requests.codes.ok)\n self.assertEqual(dto.error, None)\n self.assertEqual(dto.geoip.address, '8.8.8.8')\n self.assertEqual(dto.geoip.asystem.asn, '15169')\n time.sleep(10)\n\n def testGeoPrivateIPAddressConnectionAnonymous(self):\n ip_sample = '10.0.0.1'\n connection = client.Client()\n dto = connection.GetGeoIP(ip_sample)\n self.assertEqual(dto.status_code, requests.codes.ok)\n self.assertEqual(dto.error, None)\n self.assertEqual(dto.geoip, None)\n time.sleep(10)\n\n def testGeoBatchIPAddressesConnectionAnonymous(self):\n ip_sample = ['8.8.8.8', '9.9.9.9', '8.8.4.4']\n connection = client.Client()\n dto = connection.GetGeoBatchIP(ip_sample)\n self.assertEqual(dto.status_code, requests.codes.ok)\n self.assertEqual(dto.error, None)\n self.assertEqual(\n dto.geolocated_ip_list[0].geoip.address, dto.geolocated_ip_list[0].ip_address)\n self.assertEqual(\n dto.geolocated_ip_list[1].geoip.address, dto.geolocated_ip_list[1].ip_address)\n self.assertEqual(\n dto.geolocated_ip_list[2].geoip.address, dto.geolocated_ip_list[2].ip_address)\n time.sleep(10)\n\n def testCheckGoodDomainConnectionAnonymous(self):\n domain_sample = 'google.com'\n connection = client.Client()\n dto = connection.CheckDomain(domain_sample)\n self.assertEqual(dto.status_code, requests.codes.ok)\n self.assertEqual(dto.error, None)\n self.assertEqual(dto.response.score, 0)\n self.assertEqual('ns1.google.com' in dto.response.domain.ns, True)\n self.assertEqual('aspmx.l.google.com' in dto.response.domain.mx, True)\n time.sleep(10)\n\n def testCheckBadDomainConnectionAnonymous(self):\n domain_sample = 'mailinator.com'\n connection = client.Client()\n dto = connection.CheckDomain(domain_sample)\n self.assertEqual(dto.status_code, requests.codes.ok)\n self.assertEqual(dto.error, None)\n self.assertNotEqual(dto.response.score, 0)\n self.assertEqual(\n 'betty.ns.cloudflare.com' in dto.response.domain.ns, True)\n self.assertEqual('mail.mailinator.com' in dto.response.domain.mx, True)\n self.assertEqual('DEA' in dto.response.domain.blacklist_mx, True)\n self.assertEqual('IVOLO-DED-IP' in dto.response.ip.blacklist, True)\n time.sleep(10)\n\n def tesCheckGoodBatchDomainConnectionAnonymous(self):\n domain_sample = ['google.com', 'marca.com', 'facebook.com']\n connection = client.Client()\n dto = connection.CheckBatchDomain(domain_sample)\n self.assertEqual(dto.status_code, requests.codes.ok)\n self.assertEqual(dto.error, None)\n self.assertEqual(dto.domain_scoring_list[0].domain, 'google.com')\n self.assertEqual(dto.domain_scoring_list[1].domain, 'marca.com')\n self.assertEqual(dto.domain_scoring_list[2].domain, 'facebook.com')\n self.assertEqual(dto.domain_scoring_list[0].scoring.score, 0)\n self.assertEqual(dto.domain_scoring_list[1].scoring.score, 0)\n self.assertEqual(dto.domain_scoring_list[2].scoring.score, 0)\n time.sleep(10)\n\n def tesCheckBadBatchDomainConnectionAnonymous(self):\n domain_sample = ['loketa.com', 'mailinator.com', 'zixoa.com']\n connection = client.Client()\n dto = connection.CheckBatchDomain(domain_sample)\n self.assertEqual(dto.status_code, requests.codes.ok)\n self.assertEqual(dto.error, None)\n self.assertEqual(dto.domain_scoring_list[0].domain, 'loketa.com')\n self.assertEqual(dto.domain_scoring_list[1].domain, 'zixoa.com')\n self.assertEqual(dto.domain_scoring_list[2].domain, 'mailinator.com')\n self.assertNotEqual(dto.domain_scoring_list[0].scoring.score, 0)\n self.assertNotEqual(dto.domain_scoring_list[1].scoring.score, 0)\n self.assertNotEqual(dto.domain_scoring_list[2].scoring.score, 0)\n time.sleep(10)\n\n def testCheckGoodEmailConnectionAnonymous(self):\n email_sample = 'devops@apility.io'\n connection = client.Client()\n dto = connection.CheckEmail(email_sample)\n self.assertEqual(dto.status_code, requests.codes.ok)\n self.assertEqual(dto.error, None)\n self.assertEqual(dto.response.score, 0)\n self.assertEqual(\n 'pam.ns.cloudflare.com' in dto.response.domain.ns, True)\n self.assertEqual('aspmx.l.google.com' in dto.response.domain.mx, True)\n self.assertEqual(dto.response.disposable.is_disposable, False)\n self.assertEqual(dto.response.freemail.is_freemail, False)\n self.assertEqual(dto.response.address.is_role, False)\n self.assertEqual(dto.response.address.is_well_formed, True)\n self.assertEqual(dto.response.smtp.exist_address, True)\n time.sleep(10)\n\n def testCheckBadEmailConnectionAnonymous(self):\n email_sample = 'test@mailinator.com'\n connection = client.Client()\n dto = connection.CheckEmail(email_sample)\n self.assertEqual(dto.status_code, requests.codes.ok)\n self.assertEqual(dto.error, None)\n self.assertNotEqual(dto.response.score, 0)\n self.assertEqual(\n 'betty.ns.cloudflare.com' in dto.response.domain.ns, True)\n self.assertEqual('mail.mailinator.com' in dto.response.domain.mx, True)\n self.assertEqual('DEA' in dto.response.domain.blacklist_mx, True)\n self.assertEqual('IVOLO-DED-IP' in dto.response.ip.blacklist, True)\n self.assertEqual(dto.response.disposable.is_disposable, True)\n self.assertEqual(dto.response.address.is_role, False)\n self.assertEqual(dto.response.address.is_well_formed, True)\n self.assertEqual(dto.response.smtp.exist_address, True)\n time.sleep(10)\n\n def tesCheckGoodBatchEmailConnectionAnonymous(self):\n email_sample = ['devops@apility.io']\n connection = client.Client()\n dto = connection.CheckBatchEmail(email_sample)\n self.assertEqual(dto.status_code, requests.codes.ok)\n self.assertEqual(dto.error, None)\n self.assertEqual(dto.email_scoring_list[0].email, 'devops@apility.io')\n self.assertEqual(dto.email_scoring_list[0].scoring.score, 0)\n self.assertEqual(\n dto.email_scoring_list[0].scoring.disposable.is_disposable, False)\n self.assertEqual(\n dto.email_scoring_list[0].scoring.freemail.is_freemail, False)\n self.assertEqual(\n dto.email_scoring_list[0].scoring.address.is_role, False)\n self.assertEqual(\n dto.email_scoring_list[0].scoring.address.is_well_formed, True)\n self.assertEqual(\n dto.email_scoring_list[0].scoring.smtp.exist_address, True)\n time.sleep(10)\n\n def tesCheckBadBatchEmailConnectionAnonymous(self):\n email_sample = ['test@mailinator.com']\n connection = client.Client()\n dto = connection.CheckBatchEmail(email_sample)\n self.assertEqual(dto.status_code, requests.codes.ok)\n self.assertEqual(dto.error, None)\n self.assertEqual(\n dto.email_scoring_list[0].email, 'test@mailinator.com')\n self.assertNotEqual(dto.email_scoring_list[0].scoring.score, 0)\n self.assertEqual(\n dto.email_scoring_list[0].scoring.disposable.is_disposable, True)\n self.assertEqual(\n dto.email_scoring_list[0].scoring.freemail.is_freemail, False)\n self.assertEqual(\n dto.email_scoring_list[0].scoring.address.is_role, False)\n self.assertEqual(\n dto.email_scoring_list[0].scoring.address.is_well_formed, True)\n self.assertEqual(\n dto.email_scoring_list[0].scoring.smtp.exist_address, True)\n time.sleep(10)\n\n def testASIPAddressConnectionAnonymous(self):\n ip_sample = '8.8.8.8'\n connection = client.Client()\n dto = connection.GetASbyIP(ip_sample)\n self.assertEqual(dto.status_code, requests.codes.ok)\n self.assertEqual(dto.error, None)\n self.assertEqual(dto.asystem.name, 'Google LLC')\n self.assertEqual(dto.asystem.asn, '15169')\n time.sleep(10)\n\n def testASPrivateIPAddressConnectionAnonymous(self):\n ip_sample = '10.0.0.1'\n connection = client.Client()\n dto = connection.GetASbyIP(ip_sample)\n self.assertEqual(dto.status_code, requests.codes.not_found)\n self.assertEqual(dto.asystem, None)\n time.sleep(10)\n\n def testASNumConnectionAnonymous(self):\n asnum_sample = 15169\n connection = client.Client()\n dto = connection.GetASbyNum(asnum_sample)\n self.assertEqual(dto.status_code, requests.codes.ok)\n self.assertEqual(dto.error, None)\n self.assertEqual(dto.asystem.name, 'Google LLC')\n self.assertEqual(dto.asystem.asn, '15169')\n time.sleep(10)\n\n def testBadNumASNumConnectionAnonymous(self):\n try:\n asnum_sample = -300\n connection = client.Client()\n dto = connection.GetASbyNum(asnum_sample)\n self.assertEqual(\n 1, 0, 'Wrong formated values should return an error.')\n except:\n self.assertEqual(\n 1, 1, 'Wrong formatted values interrupted execution.')\n time.sleep(10)\n\n def testBadStringASNumConnectionAnonymous(self):\n try:\n asnum_sample = \"abcdce\"\n connection = client.Client()\n dto = connection.GetASbyNum(asnum_sample)\n self.assertEqual(\n 1, 0, 'Wrong formated values should return an error.')\n except:\n self.assertEqual(\n 1, 1, 'Wrong formatted values interrupted execution.')\n time.sleep(10)\n\n def testASBatchIPAddressesConnectionAnonymous(self):\n ip_sample = ['8.8.8.8', '9.9.9.9', '8.8.4.4']\n connection = client.Client()\n dto = connection.GetASBatchByIP(ip_sample)\n self.assertEqual(dto.status_code, requests.codes.ok)\n self.assertEqual(dto.error, None)\n self.assertEqual(dto.asystem_ip_list[0].asystem.asn, '15169')\n self.assertEqual(dto.asystem_ip_list[1].asystem.asn, '19281')\n self.assertEqual(dto.asystem_ip_list[2].asystem.asn, '15169')\n time.sleep(10)\n\n def testASBatchNumConnectionAnonymous(self):\n asn_sample = [15169, 19281]\n connection = client.Client()\n dto = connection.GetASBatchByNum(asn_sample)\n self.assertEqual(dto.status_code, requests.codes.ok)\n self.assertEqual(dto.error, None)\n self.assertEqual(dto.asystem_asn_list[0].asystem.asn, '15169')\n self.assertEqual(dto.asystem_asn_list[1].asystem.asn, '19281')\n time.sleep(10)\n\n def testWhoisIPAddressConnectionAnonymous(self):\n ip_sample = '8.8.8.8'\n connection = client.Client()\n dto = connection.GetWhoisIP(ip_sample)\n self.assertEqual(dto.status_code, requests.codes.ok)\n self.assertEqual(dto.error, None)\n self.assertEqual(dto.whois.entities[0], 'GOGL')\n self.assertEqual(dto.whois.asn, '15169')\n time.sleep(10)\n\n def testHistoryIPAddressConnectionAnonymous(self):\n ip_sample = '1.2.3.4'\n connection = client.Client()\n dto = connection.GetHistoryIP(ip_sample)\n self.assertEqual(dto.status_code, requests.codes.unauthorized)\n time.sleep(10)\n\n def testHistoryDomainConnectionAnonymous(self):\n domain = 'mailinator.com'\n connection = client.Client()\n dto = connection.GetHistoryDomain(domain)\n self.assertEqual(dto.status_code, requests.codes.unauthorized)\n time.sleep(10)\n\n def testHistoryEmailConnectionAnonymous(self):\n email = 'test@mailinator.com'\n connection = client.Client()\n dto = connection.GetHistoryEmail(email)\n self.assertEqual(dto.status_code, requests.codes.unauthorized)\n time.sleep(10)\n\n\n# if __name__ == '__main__':\n# unittest.main()\n" }, { "alpha_fraction": 0.693217396736145, "alphanum_fraction": 0.7088695764541626, "avg_line_length": 32.034481048583984, "blob_id": "34627c8b82ae76bc9027ff5084e203aea1a0d79d", "content_id": "3b573be0d1f965fabddff128f70f6dacb719093c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 2875, "license_type": "permissive", "max_line_length": 285, "num_lines": 87, "path": "/docs/quickstart.rst", "repo_name": "willbengtson/python-sdk", "src_encoding": "UTF-8", "text": ".. _quickstart:\n\nQuickstart\n==========\n\nAPI Documentation\n-----------------\nYou can read the Python documentation in :ref:`apireference`.\n\n\nFull Examples\n-------------\nIf you would like to obtain example code for any of the included client libraries, you can find it on our **examples** folder in our Github pages.\n\n\nImport the library\n------------------\nThe developer has to import the library **apilityio**\n\n.. code-block:: python\n\n import apilityio\n\n\nImport external libraries\n-------------------------\nIf the developer has already installed the library with **pip** he/she should not care about the dependencies, but the libraries needed are:\n\n- requests>=2.0.0,<3.0.0\n- validators>=0.12.2,<1.0.0\n\nIf you are running Python 2.7.x, then you also need these libraries:\n\n- py2-ipaddress<=3.4.1;python_version<\"3.4\n\n\nCreate the client object\n------------------------\nThe developer has to instance an object first passing as argument the API_KEY obtained after registering in https://apility.io.\n\nIt's possible to use the API without an API key, but it will be restricted to only 100 hits per day. Trial plan offers 1000 hits per day for 30 days and Free plan has 250 hits per day forever. Please read our pricing plans at https://apility.io/pricing.\n\nTo instantiate the client class with an API key:\n\n.. code-block:: python\n\n client = apilityio.Client(api_key=api_key)\n\n\nTo instantiate the client class without an API key:\n\n.. code-block:: python\n\n client = apilityio.Client()\n\n\nExecute API calls\n-----------------\n\nNow it's time to perform the API calls. For example to look up an IP address in Apility.io databases of blacklists:\n\n.. code-block:: python\n\n response = client.CheckIP(ip)\n\n\nIf the IP address has been not found in any blacklist, it will return a 404 code in the `status_code` attribute of the `Response` object:\n\n.. code-block:: python\n\n if response.status_code == 404:\n print(\"Congratulations! The IP address has not been found in any blacklist.\")\n\nIf the IP address has been not in any blacklist, it will return a 200 code in the `status_code` attribute of the `Response` object, and the lists of blacklists in the `blacklists` attribute:\n\n.. code-block:: python\n\n if response.status_code == 200:\n print(\"Ooops! The IP address has been found in one or more blacklist\")\n blacklists = response.blacklists\n print('+- Blacklists: %s' % blacklists)\n\nNow the developer can perform as many requests as needed with this client object. And he/she doesn't need to close the connection because it is stateless.\n\nWhat's next\n-----------\nThe developer can start using the API right away, even without registering in the service! If you have any question you can visit the website at https://apility.io, review the REST API specification at https://apility.io/apidocs and also read the User Guide at https://apility.io/docs.\n\n" }, { "alpha_fraction": 0.653419017791748, "alphanum_fraction": 0.6701545119285583, "avg_line_length": 46.9955940246582, "blob_id": "6cada52f6b3dab562a19f81d5c757e59b969193c", "content_id": "2b2ddc604c04cffe91e566cf4813c625b4b44a51", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 32685, "license_type": "permissive", "max_line_length": 94, "num_lines": 681, "path": "/tests/client_test.py", "repo_name": "willbengtson/python-sdk", "src_encoding": "UTF-8", "text": "\"\"\"\nCopyright 2017-2018 CAPITAL LAB OU\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport unittest\nimport xmlrunner\nimport requests\nimport time\nimport os\n\nimport apilityio.client as client\nimport apilityio.common as common\n\nTEST_WRONG_KEY_SAMPLE = '123dcfe6-63d3-3cd2-b427-75d1b1c117ed'\nMAX_ITERATIONS = 50\n\n# To test the API, you have to pass a valid API KEY as an exported environment variable first:\n# export APILITYIO_API_KEY=<YOUR_API_KEY>\n# You can get an API KEY for free registering in APILITYIO_API_KEY\n\nTEST_KEY_SAMPLE = os.environ['APILITYIO_API_KEY']\n\n\nclass ClientTestCase(unittest.TestCase):\n\n def testConnectionWithoutParameters(self):\n connection = client.Client()\n api_key_test, protocol_test, host_test = connection.GetConnectionData()\n self.assertEqual(api_key_test, None)\n self.assertEqual(protocol_test, common.HTTPS_PROTOCOL)\n self.assertEqual(host_test, common.DEFAULT_HOST)\n\n def testConnectionWithAPIKEYParameters(self):\n api_key_sample = TEST_WRONG_KEY_SAMPLE\n connection = client.Client(api_key=api_key_sample)\n api_key_test, protocol_test, host_test = connection.GetConnectionData()\n self.assertEqual(api_key_test, api_key_sample)\n self.assertEqual(protocol_test, common.HTTPS_PROTOCOL)\n self.assertEqual(host_test, common.DEFAULT_HOST)\n\n def testConnectionWithCustomProtocolParameters(self):\n connection = client.Client(protocol=common.HTTPS_PROTOCOL)\n api_key_test, protocol_test, host_test = connection.GetConnectionData()\n self.assertEqual(api_key_test, None)\n self.assertEqual(protocol_test, common.HTTPS_PROTOCOL)\n self.assertEqual(host_test, common.DEFAULT_HOST)\n\n connection = client.Client(protocol=common.HTTP_PROTOCOL)\n api_key_test, protocol_test, host_test = connection.GetConnectionData()\n self.assertEqual(api_key_test, None)\n self.assertEqual(protocol_test, common.HTTP_PROTOCOL)\n self.assertEqual(host_test, common.DEFAULT_HOST)\n\n def testConnectionWithCustomBadProtocolParameters(self):\n protocol_sample = 'TCP'\n try:\n connection = client.Client(protocol=protocol_sample)\n api_key_test, protocol_test, host_test = connection.GetConnectionData()\n self.assertEqual(protocol_sample, common.HTTPS_PROTOCOL)\n except:\n self.assertNotEqual(protocol_sample, common.HTTPS_PROTOCOL)\n\n def testConnectionWithCustomHostParameters(self):\n host_sample = 'google.com'\n connection = client.Client(host=host_sample)\n api_key_test, protocol_test, host_test = connection.GetConnectionData()\n self.assertEqual(api_key_test, None)\n self.assertEqual(protocol_test, common.HTTPS_PROTOCOL)\n self.assertEqual(host_test, host_sample)\n\n def testCheckBadIPAddressConnectionApiKey(self):\n api_key_sample = TEST_KEY_SAMPLE\n ip_sample = '1.2.3.4'\n connection = client.Client(api_key=api_key_sample)\n dto = connection.CheckIP(ip_sample)\n self.assertEqual(dto.status_code, requests.codes.ok)\n self.assertEqual(dto.error, None)\n self.assertNotEqual(dto.blacklists, [])\n\n def testCheckBadIPAddressConnectionApiKeyJSON(self):\n api_key_sample = TEST_KEY_SAMPLE\n ip_sample = '1.2.3.4'\n connection = client.Client(api_key=api_key_sample)\n dto = connection.CheckIP(ip_sample)\n self.assertEqual(dto.status_code, requests.codes.ok)\n self.assertEqual(dto.error, None)\n self.assertIn('QUARANTINE-IP', dto.json['response'])\n\n def testCheckBadIPAddressConnectionWrongApiKey(self):\n api_key_sample = TEST_WRONG_KEY_SAMPLE\n ip_sample = '1.2.3.4'\n connection = client.Client(api_key=api_key_sample)\n dto = connection.CheckIP(ip_sample)\n self.assertEqual(dto.status_code, requests.codes.bad_request)\n\n def testCheckBadBatchIPAddressConnectionApiKeyJSON(self):\n api_key_sample = TEST_KEY_SAMPLE\n ip_sample = ['1.2.3.4', '114.223.63.139', '114.224.29.97']\n connection = client.Client(api_key=api_key_sample)\n dto = connection.CheckBatchIP(ip_sample)\n self.assertEqual(dto.status_code, requests.codes.ok)\n self.assertEqual(dto.error, None)\n self.assertNotEqual(dto.ipblacklists_set, None)\n self.assertEqual(3, len(dto.json['response']))\n\n def testCheckBadBatchIPAddressConnectionApiKey(self):\n api_key_sample = TEST_KEY_SAMPLE\n ip_sample = ['1.2.3.4', '114.223.63.139', '114.224.29.97']\n connection = client.Client(api_key=api_key_sample)\n dto = connection.CheckBatchIP(ip_sample)\n self.assertEqual(dto.status_code, requests.codes.ok)\n self.assertEqual(dto.error, None)\n self.assertNotEqual(dto.ipblacklists_set, None)\n\n def testCheckBadBatchIPAddressConnectionWrongApiKey(self):\n api_key_sample = TEST_WRONG_KEY_SAMPLE\n ip_sample = ['1.2.3.4', '114.223.63.139', '114.224.29.97']\n connection = client.Client(api_key=api_key_sample)\n dto = connection.CheckBatchIP(ip_sample)\n self.assertEqual(dto.status_code, requests.codes.bad_request)\n\n def testGeoIPAddressConnectionApiKey(self):\n api_key_sample = TEST_KEY_SAMPLE\n ip_sample = '8.8.8.8'\n connection = client.Client(api_key=api_key_sample)\n dto = connection.GetGeoIP(ip_sample)\n self.assertEqual(dto.status_code, requests.codes.ok)\n self.assertEqual(dto.error, None)\n self.assertEqual(dto.geoip.address, '8.8.8.8')\n self.assertEqual(dto.geoip.asystem.asn, '15169')\n\n def testGeoIPAddressConnectionApiKeyJSON(self):\n api_key_sample = TEST_KEY_SAMPLE\n ip_sample = '8.8.8.8'\n connection = client.Client(api_key=api_key_sample)\n dto = connection.GetGeoIP(ip_sample)\n self.assertEqual(dto.status_code, requests.codes.ok)\n self.assertEqual(dto.error, None)\n self.assertEqual(dto.geoip.address, '8.8.8.8')\n self.assertEqual(dto.geoip.asystem.asn, '15169')\n self.assertEqual('US', dto.json['ip']['country'])\n\n def testGeoIPAddressConnectionWrongApiKey(self):\n api_key_sample = TEST_WRONG_KEY_SAMPLE\n ip_sample = '8.8.8.8'\n connection = client.Client(api_key=api_key_sample)\n dto = connection.GetGeoIP(ip_sample)\n self.assertEqual(dto.status_code, requests.codes.bad_request)\n\n def testGeoBatchIPAddressesConnectionApiKey(self):\n api_key_sample = TEST_KEY_SAMPLE\n ip_sample = ['8.8.8.8', '9.9.9.9', '8.8.4.4']\n connection = client.Client(api_key=api_key_sample)\n dto = connection.GetGeoBatchIP(ip_sample)\n self.assertEqual(dto.status_code, requests.codes.ok)\n self.assertEqual(dto.error, None)\n self.assertEqual(\n dto.geolocated_ip_list[0].geoip.address, dto.geolocated_ip_list[0].ip_address)\n self.assertEqual(\n dto.geolocated_ip_list[1].geoip.address, dto.geolocated_ip_list[1].ip_address)\n self.assertEqual(\n dto.geolocated_ip_list[2].geoip.address, dto.geolocated_ip_list[2].ip_address)\n\n def testGeoBatchIPAddressesConnectionApiKeyJSON(self):\n api_key_sample = TEST_KEY_SAMPLE\n ip_sample = ['8.8.8.8', '9.9.9.9', '8.8.4.4']\n connection = client.Client(api_key=api_key_sample)\n dto = connection.GetGeoBatchIP(ip_sample)\n self.assertEqual(dto.status_code, requests.codes.ok)\n self.assertEqual(dto.error, None)\n self.assertEqual(\n dto.geolocated_ip_list[0].geoip.address, dto.geolocated_ip_list[0].ip_address)\n self.assertEqual(\n dto.geolocated_ip_list[1].geoip.address, dto.geolocated_ip_list[1].ip_address)\n self.assertEqual(\n dto.geolocated_ip_list[2].geoip.address, dto.geolocated_ip_list[2].ip_address)\n self.assertEqual(3, len(dto.json['response']))\n\n def testGeoBatchIPAddressesConnectionWrongApiKey(self):\n api_key_sample = TEST_WRONG_KEY_SAMPLE\n ip_sample = ['8.8.8.8', '9.9.9.9', '8.8.4.4']\n connection = client.Client(api_key=api_key_sample)\n dto = connection.GetGeoBatchIP(ip_sample)\n self.assertEqual(dto.status_code, requests.codes.bad_request)\n\n def testCheckGoodDomainConnectionApiKey(self):\n api_key_sample = TEST_KEY_SAMPLE\n domain_sample = 'google.com'\n connection = client.Client(api_key=api_key_sample)\n dto = connection.CheckDomain(domain_sample)\n self.assertEqual(dto.status_code, requests.codes.ok)\n self.assertEqual(dto.error, None)\n self.assertEqual(dto.response.score, 0)\n self.assertEqual('ns1.google.com' in dto.response.domain.ns, True)\n self.assertEqual('aspmx.l.google.com' in dto.response.domain.mx, True)\n\n def testCheckGoodDomainConnectionWrongApiKey(self):\n api_key_sample = TEST_WRONG_KEY_SAMPLE\n domain_sample = 'google.com'\n connection = client.Client(api_key=api_key_sample)\n dto = connection.CheckDomain(domain_sample)\n self.assertEqual(dto.status_code, requests.codes.bad_request)\n\n def testCheckBadDomainConnectionApiKey(self):\n api_key_sample = TEST_KEY_SAMPLE\n domain_sample = 'mailinator.com'\n connection = client.Client(api_key=api_key_sample)\n dto = connection.CheckDomain(domain_sample)\n self.assertEqual(dto.status_code, requests.codes.ok)\n self.assertEqual(dto.error, None)\n self.assertNotEqual(dto.response.score, 0)\n self.assertEqual(\n 'betty.ns.cloudflare.com' in dto.response.domain.ns, True)\n self.assertEqual('mail.mailinator.com' in dto.response.domain.mx, True)\n self.assertEqual('DEA' in dto.response.domain.blacklist_mx, True)\n self.assertEqual('IVOLO-DED-IP' in dto.response.ip.blacklist, True)\n\n def testCheckBadDomainConnectionApiKeyJSON(self):\n api_key_sample = TEST_KEY_SAMPLE\n domain_sample = 'mailinator.com'\n connection = client.Client(api_key=api_key_sample)\n dto = connection.CheckDomain(domain_sample)\n self.assertEqual(dto.status_code, requests.codes.ok)\n self.assertEqual(dto.error, None)\n self.assertNotEqual(dto.response.score, 0)\n self.assertEqual(\n 'betty.ns.cloudflare.com' in dto.response.domain.ns, True)\n self.assertEqual('mail.mailinator.com' in dto.response.domain.mx, True)\n self.assertEqual('DEA' in dto.response.domain.blacklist_mx, True)\n self.assertEqual('IVOLO-DED-IP' in dto.response.ip.blacklist, True)\n self.assertEqual(-2, dto.json['response']['score'])\n\n def testCheckBadDomainConnectionWrongApiKey(self):\n api_key_sample = TEST_WRONG_KEY_SAMPLE\n domain_sample = 'mailinator.com'\n connection = client.Client(api_key=api_key_sample)\n dto = connection.CheckDomain(domain_sample)\n self.assertEqual(dto.status_code, requests.codes.bad_request)\n\n def tesCheckGoodBatchDomainConnectionApiKey(self):\n api_key_sample = TEST_KEY_SAMPLE\n domain_sample = ['google.com', 'marca.com', 'facebook.com']\n connection = client.Client(api_key=api_key_sample)\n dto = connection.CheckBatchDomain(domain_sample)\n self.assertEqual(dto.status_code, requests.codes.ok)\n self.assertEqual(dto.error, None)\n self.assertIn(dto.domain_scoring_list[0].domain, [\n 'google.com', 'marca.com', 'facebook.com'])\n self.assertIn(dto.domain_scoring_list[1].domain, [\n 'google.com', 'marca.com', 'facebook.com'])\n self.assertIn(dto.domain_scoring_list[2].domain, [\n 'google.com', 'marca.com', 'facebook.com'])\n self.assertEqual(dto.domain_scoring_list[0].scoring.score, 0)\n self.assertEqual(dto.domain_scoring_list[1].scoring.score, 0)\n self.assertEqual(dto.domain_scoring_list[2].scoring.score, 0)\n\n def tesCheckGoodBatchDomainConnectionApiKeyJSON(self):\n api_key_sample = TEST_KEY_SAMPLE\n domain_sample = ['google.com', 'marca.com', 'facebook.com']\n connection = client.Client(api_key=api_key_sample)\n dto = connection.CheckBatchDomain(domain_sample)\n self.assertEqual(dto.status_code, requests.codes.ok)\n self.assertEqual(dto.error, None)\n self.assertIn(dto.domain_scoring_list[0].domain, [\n 'google.com', 'marca.com', 'facebook.com'])\n self.assertIn(dto.domain_scoring_list[1].domain, [\n 'google.com', 'marca.com', 'facebook.com'])\n self.assertIn(dto.domain_scoring_list[2].domain, [\n 'google.com', 'marca.com', 'facebook.com'])\n self.assertEqual(dto.domain_scoring_list[0].scoring.score, 0)\n self.assertEqual(dto.domain_scoring_list[1].scoring.score, 0)\n self.assertEqual(dto.domain_scoring_list[2].scoring.score, 0)\n self.assertEqual(3, len(dto.json['response']))\n\n def tesCheckGoodBatchDomainConnectionWrongApiKey(self):\n api_key_sample = TEST_WRONG_KEY_SAMPLE\n domain_sample = ['google.com', 'marca.com', 'facebook.com']\n connection = client.Client(api_key=api_key_sample)\n dto = connection.CheckBatchDomain(domain_sample)\n self.assertEqual(dto.status_code, requests.codes.bad_request)\n\n def tesCheckBadBatchDomainConnectionApiKey(self):\n api_key_sample = TEST_KEY_SAMPLE\n domain_sample = ['loketa.com', 'mailinator.com', 'zixoa.com']\n connection = client.Client(api_key=api_key_sample)\n dto = connection.CheckBatchDomain(domain_sample)\n self.assertEqual(dto.status_code, requests.codes.ok)\n self.assertEqual(dto.error, None)\n self.assertIn(dto.domain_scoring_list[0].domain, [\n 'loketa.com', 'mailinator.com', 'zixoa.com'])\n self.assertIn(dto.domain_scoring_list[1].domain, [\n 'loketa.com', 'mailinator.com', 'zixoa.com'])\n self.assertIn(dto.domain_scoring_list[2].domain, [\n 'loketa.com', 'mailinator.com', 'zixoa.com'])\n self.assertNotEqual(dto.domain_scoring_list[0].scoring.score, 0)\n self.assertNotEqual(dto.domain_scoring_list[1].scoring.score, 0)\n self.assertNotEqual(dto.domain_scoring_list[2].scoring.score, 0)\n\n def tesCheckBadBatchDomainConnectionWrongApiKey(self):\n api_key_sample = TEST_WRONG_KEY_SAMPLE\n domain_sample = ['loketa.com', 'mailinator.com', 'zixoa.com']\n connection = client.Client(api_key=api_key_sample)\n dto = connection.CheckBatchDomain(domain_sample)\n self.assertEqual(dto.status_code, requests.codes.bad_request)\n\n def testCheckGoodEmailConnectionApiKey(self):\n api_key_sample = TEST_KEY_SAMPLE\n email_sample = 'devops@apility.io'\n connection = client.Client(api_key=api_key_sample)\n dto = connection.CheckEmail(email_sample)\n self.assertEqual(dto.status_code, requests.codes.ok)\n self.assertEqual(dto.error, None)\n self.assertEqual(dto.response.score, 0)\n self.assertEqual(\n 'pam.ns.cloudflare.com' in dto.response.domain.ns, True)\n self.assertEqual('aspmx.l.google.com' in dto.response.domain.mx, True)\n self.assertEqual(dto.response.disposable.is_disposable, False)\n self.assertEqual(dto.response.freemail.is_freemail, False)\n self.assertEqual(dto.response.address.is_role, False)\n self.assertEqual(dto.response.address.is_well_formed, True)\n self.assertEqual(dto.response.smtp.exist_address, True)\n\n def testCheckGoodEmailConnectionWrongApiKey(self):\n api_key_sample = TEST_WRONG_KEY_SAMPLE\n email_sample = 'devops@apility.io'\n connection = client.Client(api_key=api_key_sample)\n dto = connection.CheckEmail(email_sample)\n self.assertEqual(dto.status_code, requests.codes.bad_request)\n\n def testCheckBadEmailConnectionApiKey(self):\n api_key_sample = TEST_KEY_SAMPLE\n email_sample = 'test@mailinator.com'\n connection = client.Client(api_key=api_key_sample)\n dto = connection.CheckEmail(email_sample)\n self.assertEqual(dto.status_code, requests.codes.ok)\n self.assertEqual(dto.error, None)\n self.assertNotEqual(dto.response.score, 0)\n self.assertEqual(\n 'betty.ns.cloudflare.com' in dto.response.domain.ns, True)\n self.assertEqual('mail.mailinator.com' in dto.response.domain.mx, True)\n self.assertEqual('DEA' in dto.response.domain.blacklist_mx, True)\n self.assertEqual('IVOLO-DED-IP' in dto.response.ip.blacklist, True)\n self.assertEqual(dto.response.disposable.is_disposable, True)\n self.assertEqual(dto.response.address.is_role, False)\n self.assertEqual(dto.response.address.is_well_formed, True)\n self.assertEqual(dto.response.smtp.exist_address, True)\n\n def testCheckBadEmailConnectionApiKeyJSON(self):\n api_key_sample = TEST_KEY_SAMPLE\n email_sample = 'test@mailinator.com'\n connection = client.Client(api_key=api_key_sample)\n dto = connection.CheckEmail(email_sample)\n self.assertEqual(dto.status_code, requests.codes.ok)\n self.assertEqual(dto.error, None)\n self.assertNotEqual(dto.response.score, 0)\n self.assertEqual(\n 'betty.ns.cloudflare.com' in dto.response.domain.ns, True)\n self.assertEqual('mail.mailinator.com' in dto.response.domain.mx, True)\n self.assertEqual('DEA' in dto.response.domain.blacklist_mx, True)\n self.assertEqual('IVOLO-DED-IP' in dto.response.ip.blacklist, True)\n self.assertEqual(dto.response.disposable.is_disposable, True)\n self.assertEqual(dto.response.address.is_role, False)\n self.assertEqual(dto.response.address.is_well_formed, True)\n self.assertEqual(dto.response.smtp.exist_address, True)\n self.assertEqual(-3, dto.json['response']['score'])\n\n def testCheckBadEmailConnectionWrongApiKey(self):\n api_key_sample = TEST_WRONG_KEY_SAMPLE\n email_sample = 'test@mailinator.com'\n connection = client.Client(api_key=api_key_sample)\n dto = connection.CheckEmail(email_sample)\n self.assertEqual(dto.status_code, requests.codes.bad_request)\n\n def tesCheckGoodBatchEmailConnectionApiKey(self):\n api_key_sample = TEST_KEY_SAMPLE\n email_sample = ['devops@apility.io']\n connection = client.Client(api_key=api_key_sample)\n dto = connection.CheckBatchEmail(email_sample)\n self.assertEqual(dto.status_code, requests.codes.ok)\n self.assertEqual(dto.error, None)\n self.assertEqual(dto.email_scoring_list[0].email, 'devops@apility.io')\n self.assertEqual(dto.email_scoring_list[0].scoring.score, 0)\n self.assertEqual(\n dto.email_scoring_list[0].scoring.disposable.is_disposable, False)\n self.assertEqual(\n dto.email_scoring_list[0].scoring.freemail.is_freemail, False)\n self.assertEqual(\n dto.email_scoring_list[0].scoring.address.is_role, False)\n self.assertEqual(\n dto.email_scoring_list[0].scoring.address.is_well_formed, True)\n self.assertEqual(\n dto.email_scoring_list[0].scoring.smtp.exist_address, True)\n\n def tesCheckGoodBatchEmailConnectionWrongApiKey(self):\n api_key_sample = TEST_WRONG_KEY_SAMPLE\n email_sample = ['devops@apility.io']\n connection = client.Client(api_key=api_key_sample)\n dto = connection.CheckBatchEmail(email_sample)\n self.assertEqual(dto.status_code, requests.codes.bad_request)\n\n def tesCheckBadBatchEmailConnectionApiKey(self):\n api_key_sample = TEST_KEY_SAMPLE\n email_sample = ['test@mailinator.com']\n connection = client.Client(api_key=api_key_sample)\n dto = connection.CheckBatchEmail(email_sample)\n self.assertEqual(dto.status_code, requests.codes.ok)\n self.assertEqual(dto.error, None)\n self.assertEqual(\n dto.email_scoring_list[0].email, 'test@mailinator.com')\n self.assertNotEqual(dto.email_scoring_list[0].scoring.score, 0)\n self.assertEqual(\n dto.email_scoring_list[0].scoring.disposable.is_disposable, True)\n self.assertEqual(\n dto.email_scoring_list[0].scoring.freemail.is_freemail, False)\n self.assertEqual(\n dto.email_scoring_list[0].scoring.address.is_role, False)\n self.assertEqual(\n dto.email_scoring_list[0].scoring.address.is_well_formed, True)\n self.assertEqual(\n dto.email_scoring_list[0].scoring.smtp.exist_address, True)\n\n def tesCheckBadBatchEmailConnectionApiKeyJSON(self):\n api_key_sample = TEST_KEY_SAMPLE\n email_sample = ['test@mailinator.com']\n connection = client.Client(api_key=api_key_sample)\n dto = connection.CheckBatchEmail(email_sample)\n self.assertEqual(dto.status_code, requests.codes.ok)\n self.assertEqual(dto.error, None)\n self.assertEqual(\n dto.email_scoring_list[0].email, 'test@mailinator.com')\n self.assertNotEqual(dto.email_scoring_list[0].scoring.score, 0)\n self.assertEqual(\n dto.email_scoring_list[0].scoring.disposable.is_disposable, True)\n self.assertEqual(\n dto.email_scoring_list[0].scoring.freemail.is_freemail, False)\n self.assertEqual(\n dto.email_scoring_list[0].scoring.address.is_role, False)\n self.assertEqual(\n dto.email_scoring_list[0].scoring.address.is_well_formed, True)\n self.assertEqual(\n dto.email_scoring_list[0].scoring.smtp.exist_address, True)\n self.assertEqual(1, len(dto.json['response']))\n\n def tesCheckBadBatchEmailConnectionWrongApiKey(self):\n api_key_sample = TEST_WRONG_KEY_SAMPLE\n email_sample = ['test@mailinator.com']\n connection = client.Client(api_key=api_key_sample)\n dto = connection.CheckBatchEmail(email_sample)\n self.assertEqual(dto.status_code, requests.codes.bad_request)\n\n def testASIPAddressConnectionApiKey(self):\n api_key_sample = TEST_KEY_SAMPLE\n ip_sample = '8.8.8.8'\n connection = client.Client(api_key=api_key_sample)\n dto = connection.GetASbyIP(ip_sample)\n self.assertEqual(dto.status_code, requests.codes.ok)\n self.assertEqual(dto.error, None)\n self.assertEqual(dto.asystem.name, 'Google LLC')\n self.assertEqual(dto.asystem.asn, '15169')\n\n def testASIPAddressConnectionApiKeyJSON(self):\n api_key_sample = TEST_KEY_SAMPLE\n ip_sample = '8.8.8.8'\n connection = client.Client(api_key=api_key_sample)\n dto = connection.GetASbyIP(ip_sample)\n self.assertEqual(dto.status_code, requests.codes.ok)\n self.assertEqual(dto.error, None)\n self.assertEqual(dto.asystem.name, 'Google LLC')\n self.assertEqual(dto.asystem.asn, '15169')\n self.assertEqual(dto.json['as']['asn'], '15169')\n\n def testASPrivateIPAddressConnectionApiKey(self):\n api_key_sample = TEST_KEY_SAMPLE\n ip_sample = '10.0.0.1'\n connection = client.Client(api_key=api_key_sample)\n dto = connection.GetASbyIP(ip_sample)\n self.assertEqual(dto.status_code, requests.codes.not_found)\n self.assertEqual(dto.asystem, None)\n\n def testASIPAddressConnectionWrongApiKey(self):\n api_key_sample = TEST_WRONG_KEY_SAMPLE\n ip_sample = '8.8.8.8'\n connection = client.Client(api_key=api_key_sample)\n dto = connection.GetASbyIP(ip_sample)\n self.assertEqual(dto.status_code, requests.codes.bad_request)\n\n def testASPrivateIPAddressConnectionWrongApiKey(self):\n api_key_sample = TEST_WRONG_KEY_SAMPLE\n ip_sample = '10.0.0.1'\n connection = client.Client(api_key=api_key_sample)\n dto = connection.GetASbyIP(ip_sample)\n self.assertEqual(dto.status_code, requests.codes.bad_request)\n\n def testASNumConnectionApiKey(self):\n api_key_sample = TEST_KEY_SAMPLE\n asnum_sample = 15169\n connection = client.Client(api_key=api_key_sample)\n dto = connection.GetASbyNum(asnum_sample)\n self.assertEqual(dto.status_code, requests.codes.ok)\n self.assertEqual(dto.error, None)\n self.assertEqual(dto.asystem.name, 'Google LLC')\n self.assertEqual(dto.asystem.asn, '15169')\n self.assertEqual(dto.json['as']['asn'], '15169')\n\n def testASNumConnectionApiKeyJSON(self):\n api_key_sample = TEST_KEY_SAMPLE\n asnum_sample = 15169\n connection = client.Client(api_key=api_key_sample)\n dto = connection.GetASbyNum(asnum_sample)\n self.assertEqual(dto.status_code, requests.codes.ok)\n self.assertEqual(dto.error, None)\n self.assertEqual(dto.asystem.name, 'Google LLC')\n self.assertEqual(dto.asystem.asn, '15169')\n\n def testASNumConnectionWrongApiKey(self):\n api_key_sample = TEST_WRONG_KEY_SAMPLE\n asnum_sample = 15169\n connection = client.Client(api_key=api_key_sample)\n dto = connection.GetASbyNum(asnum_sample)\n self.assertEqual(dto.status_code, requests.codes.bad_request)\n\n def testASBatchIPAddressesConnectionApiKey(self):\n api_key_sample = TEST_KEY_SAMPLE\n ip_sample = ['8.8.8.8', '9.9.9.9', '8.8.4.4']\n connection = client.Client(api_key=api_key_sample)\n dto = connection.GetASBatchByIP(ip_sample)\n self.assertEqual(dto.status_code, requests.codes.ok)\n self.assertEqual(dto.error, None)\n self.assertIn(dto.asystem_ip_list[0].asystem.asn, ['15169', '19281'])\n self.assertIn(dto.asystem_ip_list[1].asystem.asn, ['15169', '19281'])\n self.assertIn(dto.asystem_ip_list[2].asystem.asn, ['15169', '19281'])\n\n def testASBatchIPAddressesConnectionApiKeyJSON(self):\n api_key_sample = TEST_KEY_SAMPLE\n ip_sample = ['8.8.8.8', '9.9.9.9', '8.8.4.4']\n connection = client.Client(api_key=api_key_sample)\n dto = connection.GetASBatchByIP(ip_sample)\n self.assertEqual(dto.status_code, requests.codes.ok)\n self.assertEqual(dto.error, None)\n self.assertIn(dto.asystem_ip_list[0].asystem.asn, ['15169', '19281'])\n self.assertIn(dto.asystem_ip_list[1].asystem.asn, ['15169', '19281'])\n self.assertIn(dto.asystem_ip_list[2].asystem.asn, ['15169', '19281'])\n self.assertEqual(3, len(dto.json['response']))\n\n def testASBatchIPAddressesConnectionWrongApiKey(self):\n api_key_sample = TEST_WRONG_KEY_SAMPLE\n ip_sample = ['8.8.8.8', '9.9.9.9', '8.8.4.4']\n connection = client.Client(api_key=api_key_sample)\n dto = connection.GetASBatchByIP(ip_sample)\n self.assertEqual(dto.status_code, requests.codes.bad_request)\n\n def testASBatchNumConnectionApiKey(self):\n api_key_sample = TEST_KEY_SAMPLE\n asn_sample = [15169, 19281]\n connection = client.Client(api_key=api_key_sample)\n dto = connection.GetASBatchByNum(asn_sample)\n self.assertEqual(dto.status_code, requests.codes.ok)\n self.assertEqual(dto.error, None)\n self.assertIn(dto.asystem_asn_list[0].asystem.asn, ['15169', '19281'])\n self.assertIn(dto.asystem_asn_list[1].asystem.asn, ['15169', '19281'])\n\n def testASBatchNumConnectionApiKeyJSON(self):\n api_key_sample = TEST_KEY_SAMPLE\n asn_sample = [15169, 19281]\n connection = client.Client(api_key=api_key_sample)\n dto = connection.GetASBatchByNum(asn_sample)\n self.assertEqual(dto.status_code, requests.codes.ok)\n self.assertEqual(dto.error, None)\n self.assertIn(dto.asystem_asn_list[0].asystem.asn, ['15169', '19281'])\n self.assertIn(dto.asystem_asn_list[1].asystem.asn, ['15169', '19281'])\n self.assertEqual(2, len(dto.json['response']))\n\n def testASBatchNumConnectionWrongApiKey(self):\n api_key_sample = TEST_WRONG_KEY_SAMPLE\n asn_sample = [15169, 19281]\n connection = client.Client(api_key=api_key_sample)\n dto = connection.GetASBatchByNum(asn_sample)\n self.assertEqual(dto.status_code, requests.codes.bad_request)\n\n def testWhoisIPAddressConnectionApiKey(self):\n api_key_sample = TEST_KEY_SAMPLE\n ip_sample = '9.9.9.9'\n connection = client.Client(api_key=api_key_sample)\n dto = connection.GetWhoisIP(ip_sample)\n self.assertEqual(dto.status_code, requests.codes.ok)\n self.assertEqual(dto.error, None)\n self.assertEqual(dto.whois.entities[0], 'CLEAN-97')\n self.assertEqual(dto.whois.asn, '19281')\n\n def testWhoisIPAddressConnectionApiKeyJSON(self):\n api_key_sample = TEST_KEY_SAMPLE\n ip_sample = '9.9.9.9'\n connection = client.Client(api_key=api_key_sample)\n dto = connection.GetWhoisIP(ip_sample)\n self.assertEqual(dto.status_code, requests.codes.ok)\n self.assertEqual(dto.error, None)\n self.assertEqual(dto.whois.entities[0], 'CLEAN-97')\n self.assertEqual(dto.whois.asn, '19281')\n self.assertEqual('19281', dto.json['whois']['asn'])\n\n def testWhoisIPAddressConnectionWrongApiKey(self):\n api_key_sample = TEST_WRONG_KEY_SAMPLE\n ip_sample = '9.9.9.9'\n connection = client.Client(api_key=api_key_sample)\n dto = connection.GetWhoisIP(ip_sample)\n self.assertEqual(dto.status_code, requests.codes.bad_request)\n\n def testHistoryIPAddressConnectionApiKey(self):\n api_key_sample = TEST_KEY_SAMPLE\n ip_sample = '1.2.3.4'\n connection = client.Client(api_key=api_key_sample)\n dto = connection.GetHistoryIP(ip_sample)\n self.assertEqual(dto.status_code, requests.codes.ok)\n self.assertEqual(dto.error, None)\n self.assertEqual(dto.history[0].ip, '1.2.3.4')\n self.assertEqual(len(dto.history[0].blacklist_change) > 0, True)\n\n def testHistoryIPAddressConnectionApiKeyJSON(self):\n api_key_sample = TEST_KEY_SAMPLE\n ip_sample = '1.2.3.4'\n connection = client.Client(api_key=api_key_sample)\n dto = connection.GetHistoryIP(ip_sample)\n self.assertEqual(dto.status_code, requests.codes.ok)\n self.assertEqual(dto.error, None)\n self.assertEqual(dto.history[0].ip, '1.2.3.4')\n self.assertEqual(len(dto.history[0].blacklist_change) > 0, True)\n self.assertGreater(len(dto.json['changes_ip']), 0)\n\n def testHistoryDomainConnectionApiKey(self):\n api_key_sample = TEST_KEY_SAMPLE\n domain = 'mailinator.com'\n connection = client.Client(api_key=api_key_sample)\n dto = connection.GetHistoryDomain(domain)\n self.assertEqual(dto.status_code, requests.codes.ok)\n self.assertEqual(dto.error, None)\n self.assertEqual(dto.history[0].domain, 'mailinator.com')\n self.assertEqual(len(dto.history[0].blacklist_change) > 0, True)\n\n def testHistoryDomainConnectionApiKeyJSON(self):\n api_key_sample = TEST_KEY_SAMPLE\n domain = 'mailinator.com'\n connection = client.Client(api_key=api_key_sample)\n dto = connection.GetHistoryDomain(domain)\n self.assertEqual(dto.status_code, requests.codes.ok)\n self.assertEqual(dto.error, None)\n self.assertEqual(dto.history[0].domain, 'mailinator.com')\n self.assertEqual(len(dto.history[0].blacklist_change) > 0, True)\n self.assertGreater(len(dto.json['changes_domain']), 0)\n\n def testHistoryEmailConnectionApiKey(self):\n api_key_sample = TEST_KEY_SAMPLE\n email = 'test@mailinator.com'\n connection = client.Client(api_key=api_key_sample)\n dto = connection.GetHistoryEmail(email)\n self.assertEqual(dto.status_code, requests.codes.ok)\n self.assertEqual(dto.error, None)\n self.assertEqual(dto.history[0].email, 'test@mailinator.com')\n self.assertEqual(len(dto.history[0].blacklist_change) > 0, True)\n\n def testHistoryEmailConnectionApiKeyJSON(self):\n api_key_sample = TEST_KEY_SAMPLE\n email = 'test@mailinator.com'\n connection = client.Client(api_key=api_key_sample)\n dto = connection.GetHistoryEmail(email)\n self.assertEqual(dto.status_code, requests.codes.ok)\n self.assertEqual(dto.error, None)\n self.assertEqual(dto.history[0].email, 'test@mailinator.com')\n self.assertEqual(len(dto.history[0].blacklist_change) > 0, True)\n self.assertGreater(len(dto.json['changes_email']), 0)\n" }, { "alpha_fraction": 0.6647129058837891, "alphanum_fraction": 0.6684565544128418, "avg_line_length": 47.30045700073242, "blob_id": "f6b615b62a2d8b0fd3bf52a59e1cf1b0b2a444cd", "content_id": "db2c1823db56c1d75aa19c0d5b162d8308330e92", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 52889, "license_type": "permissive", "max_line_length": 180, "num_lines": 1095, "path": "/apilityio/model.py", "repo_name": "willbengtson/python-sdk", "src_encoding": "UTF-8", "text": "# Copyright 2017-2018 CAPITAL LAB OU\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport requests\n\n\nclass BaseDict(dict):\n \"\"\"Create a Generic object from dict.\n \"\"\"\n\n def __getattr__(self, name):\n if name in self:\n return self[name]\n else:\n raise AttributeError(\"No such attribute: \" + name)\n\n def __setattr__(self, name, value):\n self[name] = value\n\n def __delattr__(self, name):\n if name in self:\n del self[name]\n else:\n raise AttributeError(\"No such attribute: \" + name)\n\n\nclass Response(object):\n \"\"\"Create a basic response object.\n\n Keyword Arguments:\n - ``status_code``: An integer with the HTTP response status code\n - ``error``: If status code is not 200 (OK), the error returned by the server.\n - ``json``: JSON object returned by the REST API without modifications.\n\n Attributes:\n - ``status_code``: An integer with the HTTP response status code\n - ``error``: If status code is not 200 (OK), the error returned by the server.\n - ``json``: JSON object returned by the REST API without modifications.\n \"\"\"\n\n def __init__(self, status_code=requests.codes.ok, error=None, json=None):\n self.status_code = status_code\n self.error = error\n self.json = json\n\n\nclass BadIPResponse(Response):\n \"\"\"Response object with the result of a query to check if the IP address has been found in any blacklist.\n\n Keyword Arguments:\n - ``status_code``: An integer with the HTTP response status code. See https://apility.io/apidocs/#errors\n - ``error``: If status code is not 200 (OK) or 404 (NOT_FOUND), the error returned by the server.\n - ``json``: JSON object returned by the REST API without modifications.\n - ``blacklists``: List of strings with the name of the Blacklists of the IP.\n\n Attributes:\n - ``status_code``: An integer with the HTTP response status code. See https://apility.io/apidocs/#errors\n - ``error``: If status code is not 200 (OK) or 404 (NOT_FOUND), the error returned by the server.\n - ``json``: JSON object returned by the REST API without modifications.\n - ``blacklists``: List of strings with the name of the Blacklists of the IP.\n \"\"\"\n\n def __init__(self, status_code=requests.codes.ok, error=None, blacklists=[], json=None):\n super(BadIPResponse, self).__init__(\n status_code=status_code, error=error, json=json)\n self.blacklists = blacklists\n\n\nclass IPBlacklist(object):\n \"\"\"Object to pair IP adress and blacklists. This object contains an IP address and a list with the blacklists it was found.\n\n Keyword Arguments:\n - ``ip_address``: the ip address of the pair\n - ``blacklists``: the list of strings with the blacklists names of the IP address\n\n Attributes:\n - ``ip_address``: the ip address of the pair\n - ``blacklists``: the list of strings with the blacklists names of the IP address\n \"\"\"\n\n def __init__(self, ip_address, blacklists):\n self.ip_address = ip_address\n self.blacklists = blacklists\n\n\nclass BadBatchIPResponse(Response):\n \"\"\"Response object with the result of a query to check if a group of IP addresses have been found in any blacklist.\n\n Keyword Arguments:\n - ``status_code``: An integer with the HTTP response status code. See https://apility.io/apidocs/#errors\n - ``error``: If status code is not 200 (OK), the error returned by the server.\n - ``json``: JSON object returned by the REST API without modifications.\n - ``ipblacklists_set``: Set of :func:`~apilityio.model.IPBlacklist` objects that contains the result of the check performed on each IP address.\n\n Attributes:\n - ``status_code``: An integer with the HTTP response status code. See https://apility.io/apidocs/#errors\n - ``error``: If status code is not 200 (OK), the error returned by the server.\n - ``json``: JSON object returned by the REST API without modifications.\n - ``ipblacklists_set``: Set of :func:`~apilityio.model.IPBlacklist` objects that contains the result of the check performed on each IP address.\n \"\"\"\n\n def __init__(self, status_code=requests.codes.ok, error=None, ipblacklists_set=set(), json=None):\n super(BadBatchIPResponse, self).__init__(\n status_code=status_code, error=error, json=json)\n self.ipblacklists_set = ipblacklists_set\n\n\nclass ContinentNames(dict):\n \"\"\"Object to cointain all the translations of a continent.\n\n Keyword Arguments:\n - ``continent_names``: Dictionary containing all the names in different languages of a given continent.\n\n Attributes:\n - ``en``: English\n - ``pt-BR``: Portuguese\n - ``fr``: French\n - ``ja``: Japanes\n - ``de``: German\n - ``zh-CN``: Chinese\n - ``es``: Spanish\n - ``ru``: Russian\n \"\"\"\n\n\nclass CountryNames(BaseDict):\n \"\"\"Object to cointain all the translations of a country. It is only guaranteed to exists the english (en) attribute.\n\n Keyword Arguments:\n - ``country_names``: Dictionary containing all the names in different languages of a given country\n\n Attributes:\n - ``en``: English\n - ``pt-BR``: Portuguese\n - ``fr``: French\n - ``ja``: Japanes\n - ``de``: German\n - ``zh-CN``: Chinese\n - ``es``: Spanish\n - ``ru``: Russian\n \"\"\"\n\n\nclass GeoIP(BaseDict):\n \"\"\"Object to cointain all geolocation data of the IP address.\n\n Keyword Arguments:\n - ``geoip``: Dictionary containing all the geo location data as described in https://apility.io/apidocs/#geoip\n\n Attributes:\n - ``longitude``: Longitude where the IP has been found\n - ``latitude``: Latitude where the IP has been found\n - ``hostname``: Name of the host resolved from the IP\n - ``address``: IPv4 or IPv6 address of the request\n - ``continent``: 2 letter code of the continent.\n - ``country``: ISO 3166-1 Country code.\n - ``region``: Name of the region, by default the english translation in 'region_names'.\n - ``city``: Name of the city, by default the english translation in 'city_names'.\n - ``postal``: Postal code or Zip code\n - ``time_zone``: Time zone of the location\n - ``accuracy_radius``: The approximate radius in kilometers around the latitude and longitude for the geographical entity. -1 if unknown.\n - ``continent_geoname_id``: Id of the continent in the geonames.org database. -1 if the continent cannot be geolocated.\n - ``country_geoname_id``: Id of the country in the geonames.org database. -1 if the country cannot be geolocated.\n - ``region_geoname_id``: Id of the region in the geonames.org database. -1 if the region cannot be geolocated.\n - ``city_geoname_id``: Id of the city in the geonames.org database. -1 if the city cannot be geolocated.\n - ``continent_names``: Object containing the :func:`~apilityio.model.ContinentNames` data.\n - ``country_names``: Object containing the :func:`~apilityio.model.CountryNames` data.\n - ``region_names``: JSON structure containing the different names of the region in different languages. Languages are in ISO 639-1. Empty if region cannot be geolocated.\n - ``city_names``: JSON structure containing the different names of the city in different languages. Languages are in ISO 639-1. Empty if city cannot be geolocated.\n - ``asystem``: Object containing the :func:`~apilityio.model.ASystem` data.\n \"\"\"\n\n def __init__(self, geoip):\n super(GeoIP, self).__init__(geoip)\n self.country_names = CountryNames(geoip['country_names'])\n self.continent_names = ContinentNames(geoip['continent_names'])\n self.asystem = ASystem(geoip['as'])\n\n\nclass ASystem(BaseDict):\n \"\"\"Object to cointain all the information of an Autonomous System.\n\n Keyword Arguments:\n - ``asystem``: Dictionary containing all the autonomous system information described in https://apility.io/apidocs/#as\n\n Attributes:\n - ``asn``: AS number\n - ``name``: name of the AS\n - ``country``: ISO 3166-1 Country code\n - ``networks``: List with the networks of the AS\n \"\"\"\n\n\nclass GeoIPResponse(Response):\n \"\"\"Response object with the result of a query to get the IP address geolocation data.\n\n Keyword Arguments:\n - ``status_code``: An integer with the HTTP response status code. See https://apility.io/apidocs/#errors\n - ``error``: If status code is not 200 (OK) or the error returned by the server.\n - ``json``: JSON object returned by the REST API without modifications.\n - ``geoip``: Dictionary containing all the geo location data as described in https://apility.io/apidocs/#geoip\n\n Attributes:\n - ``status_code``: An integer with the HTTP response status code. See https://apility.io/apidocs/#errors\n - ``error``: If status code is not 200 (OK) or the error returned by the server.\n - ``json``: JSON object returned by the REST API without modifications.\n - ``geoip``: Object :func:`~apilityio.model.GeoIP` containing all geolocation attributes.\n \"\"\"\n\n def __init__(self, status_code=requests.codes.ok, error=None, geoip=None, json=None):\n super(GeoIPResponse, self).__init__(\n status_code=status_code, error=error, json=json)\n if geoip is not None and 'address' in geoip:\n self.geoip = GeoIP(geoip)\n else:\n self.geoip = None\n\n\nclass IPGeodata(object):\n \"\"\"Object to pair IP adress and geodata information. This object contains an IP address and its geodata information.\n\n Keyword Arguments:\n - ``ip_address``: the ip address of the pair\n - ``geodata``: an Object :func:`~apilityio.model.GeoIP` object with the geodata information\n\n Attributes:\n - ``ip_address``: the ip address of the pair\n - ``geoip``: an Object :func:`~apilityio.model.GeoIP` object with the geodata information\n \"\"\"\n\n def __init__(self, ip_address, geodata):\n self.ip_address = ip_address\n self.geoip = geodata\n\n\nclass GeoBatchIPResponse(Response):\n \"\"\"Response object with the result of a query to get the geolocation data of multiple IP addresses.\n\n Keyword Arguments:\n - ``status_code``: An integer with the HTTP response status code\n - ``error``: If status code is not 200 (OK), the error returned by the server.\n - ``json``: JSON object returned by the REST API without modifications.\n - ``geolocated_ip_list``: List of :func:`~apilityio.model.IPGeodata` objects.\n\n Attributes:\n - ``status_code``: An integer with the HTTP response status code\n - ``error``: If status code is not 200 (OK), the error returned by the server.\n - ``json``: JSON object returned by the REST API without modifications.\n - ``geolocated_ip_list``: List of :func:`~apilityio.model.IPGeodata` objects.\n \"\"\"\n\n def __init__(self, status_code=requests.codes.ok, error=None, geolocated_ip_list=[], json=None):\n\n super(GeoBatchIPResponse, self).__init__(\n status_code=status_code, error=error, json=json)\n self.geolocated_ip_list = geolocated_ip_list\n\n\nclass IP(BaseDict):\n \"\"\"Object to cointain the information of the information of looking up the IP in the blacklists.\n\n Keyword Arguments:\n - ``ip_address``: Dictionary containing all the information of the IP in the blacklists as described in https://apility.io/apidocs/#ip-score\n\n Attributes:\n - ``score``: Number describing the result of the algorithm. Negative means 'suspicious' or 'bad' IP. Neutral or positive means it's a 'clean' IP.\n - ``blacklist``: List containing the blacklists where the IP was found.\n - ``is_quarantined``: If the IP has been added by the user to the quarantine lists.\n - ``address``: IPv4 or IPv6 resolved.\n \"\"\"\n\n\nclass Domain(BaseDict):\n \"\"\"Object to cointain the information of testing different subdomains of the main root domain: NS records, MX records and domain blacklists.\n\n Keyword Arguments:\n - ``domain``: Dictionary containing all the subdomains of the main root domain: NS records, MX records and domain as described in https://apility.io/apidocs/#domainname-score\n\n Attributes:\n - ``score``: Number describing the result of the algorithm. Negative means 'suspicious' or 'bad' domain. Neutral or positive means it's a 'clean' domain.\n - ``blacklist_ns``: List containing the blacklists where the NS domains were found.\n - ``blacklist_mx``: List containing the blacklists where the MX domains were found.\n - ``blacklist``: List containing the blacklists where the domain was found.\n - ``mx``: List with the hosts found in the MX records.\n - ``ns``: List with the hosts found in the NS records.\n \"\"\"\n\n\nclass BadDomain(BaseDict):\n \"\"\"Object to cointain all scoring and blacklist analysis for main Domain, MX and NS records and IP address.\n\n Keyword Arguments:\n - ``domain_data``: Dictionary containing all the domain analysis data as described in https://apility.io/apidocs/#domain\n\n Attributes:\n - ``score``: Number describing the result of the algorithm. Negative means 'suspicious' or 'bad' domain. Neutral or positivo means it's a 'clean' domain.\n - ``domain``: Object :func:`~apilityio.model.Domain` containing the 'domainname score' information as result of the analysis of the domains.\n - ``ip``: Object :func:`~apilityio.model.IP` containing the 'ip score' information as result of the analysis of the IP of the domain.\n - ``source_ip``: Object :func:`~apilityio.model.IP` containing the 'ip score' information as result of the analysis of the IP origin of the request.\n \"\"\"\n\n def __init__(self, domain_data):\n super(BadDomain, self).__init__(domain_data)\n self.domain = Domain(domain_data['domain'])\n self.ip = IP(domain_data['ip'])\n self.source_ip = IP(domain_data['source_ip'])\n\n\nclass BadDomainResponse(Response):\n \"\"\"Response object with the result of a query to check if the Domain and its MX and NS records have been found in any blacklist.\n\n Keyword Arguments:\n - ``status_code``: An integer with the HTTP response status code. See https://apility.io/apidocs/#errors\n - ``error``: If status code is not 200 (OK) or 404 (NOT_FOUND), the error returned by the server.\n - ``json``: JSON object returned by the REST API without modifications.\n - ``domain_data``: Dictionary containing all the domain analysis data as described in https://apility.io/apidocs/#domain\n\n Attributes:\n - ``status_code``: An integer with the HTTP response status code. See https://apility.io/apidocs/#errors\n - ``error``: If status code is not 200 (OK) or 404 (NOT_FOUND), the error returned by the server.\n - ``json``: JSON object returned by the REST API without modifications.\n - ``response``: Object :func:`~apilityio.model.BadDomain` containing all scoring and blacklists of the Domain.\n \"\"\"\n\n def __init__(self, status_code=requests.codes.ok, error=None, domain_data=None, json=None):\n super(BadDomainResponse, self).__init__(\n status_code=status_code, error=error, json=json)\n if domain_data is not None:\n self.response = BadDomain(domain_data)\n else:\n self.response = None\n\n\nclass DomainScored(object):\n \"\"\"Object to pair domain and the result of the scoring process\n\n Keyword Arguments:\n - ``domain``: the domain FQDN of the pair\n - ``scored_domain``: an Object :func:`~apilityio.model.BadDomain` with the scoring information\n\n Attributes:\n - ``domain``: the domain FQDN of the pair\n - ``scoring``: an Object :func:`~apilityio.model.BadDomain` with the scoring information\n \"\"\"\n\n def __init__(self, domain, scored_domain):\n self.domain = domain\n self.scoring = scored_domain\n\n\nclass BadBatchDomainResponse(Response):\n \"\"\"Response object with the result of a query to get the analysis data of multiple domains.\n\n Keyword Arguments:\n - ``status_code``: An integer with the HTTP response status code\n - ``error``: If status code is not 200 (OK), the error returned by the server.\n - ``json``: JSON object returned by the REST API without modifications.\n - ``domain_scoring_list``: List of :func:`~apilityio.model.DomainScored` objects.\n\n Attributes:\n - ``status_code``: An integer with the HTTP response status code\n - ``error``: If status code is not 200 (OK), the error returned by the server.\n - ``json``: JSON object returned by the REST API without modifications.\n - ``domain_scoring_list``: List of :func:`~apilityio.model.DomainScored` objects.\n \"\"\"\n\n def __init__(self, status_code=requests.codes.ok, error=None, domain_scoring_list=[], json=None):\n super(BadBatchDomainResponse, self).__init__(\n status_code=status_code, error=error, json=json)\n self.domain_scoring_list = domain_scoring_list\n\n\nclass EmailAddress(BaseDict):\n \"\"\"Object to cointain the information of the format of the Email address.\n\n Keyword Arguments:\n - ``address_score``: Dictionary containing all the address format details described in https://apility.io/apidocs/#address-score\n\n Attributes:\n - ``score``: Number describing the result of the algorithm. Negative means 'suspicious' or 'bad' IP. Neutral or positive means it's a 'clean' Email.\n - ``is_role``: True if the email has the format of a role-based-address. It's not common to allow registration with role-based-addresses.\n - ``is_well_formed``:\tTrue if the email is compliant with the standard email formats.\n \"\"\"\n\n\nclass SMTPInfo(BaseDict):\n \"\"\"Object to cointain the information obtained after testing the remote inbox SMTP server where the email is hosted.\n\n Keyword Arguments:\n - ``smtp_score``: Dictionary containing all SMTP score test details in in https://apility.io/apidocs/#smtp-score\n\n Attributes:\n - ``score``: Number describing the result of the algorithm. Negative means 'suspicious' or 'bad' IP. Neutral or positive means it's a 'clean' Email.\n - ``exist_mx``: True if the SMTP service is reachable using the hosts in the MX records.\n - ``exist_address``: True if the SMTP service recognizes the email address.\n - ``exist_catchall``:\tTrue if the SMTP service implements a catch-all email feature.\n \"\"\"\n\n\nclass FreeEmail(BaseDict):\n \"\"\"Object to cointain the information checking the domain against a list of Free domain servers.\n\n Keyword Arguments:\n - ``freemail_score``: Dictionary containing all Freemail score test details in in https://apility.io/apidocs/#freemail-score\n\n Attributes:\n - ``score``: Number describing the result of the algorithm. Negative means 'suspicious' or 'bad' IP. Neutral or positive means it's a 'clean' Email.\n - ``is_freemail``: True if the domain has been found in any Free Email Service Provider list.\n \"\"\"\n\n\nclass EmailScore(BaseDict):\n \"\"\"Object to cointain the information checking the email against a list of Email addresses of abusers.\n\n Keyword Arguments:\n - ``email_score``: Dictionary containing all Email of abusers score test details in in https://apility.io/apidocs/#email-score\n\n Attributes:\n - ``score``: Number describing the result of the algorithm. Negative means 'suspicious' or 'bad' IP. Neutral or positive means it's a 'clean' Email.\n - ``blacklist``: List containing the blacklists where the email was found.\n \"\"\"\n\n\nclass DisposableEmail(BaseDict):\n \"\"\"Object to cointain the information checking the domain against a list of Disposable Email Addresses.\n\n Keyword Arguments:\n - ``disposable_score``: Dictionary containing all Disposable score test details in in https://apility.io/apidocs/#disposable-score\n\n Attributes:\n - ``score``: Number describing the result of the algorithm. Negative means 'suspicious' or 'bad' IP. Neutral or positive means it's a 'clean' Email.\n - ``is_disposable``: True if The domain has been found in any Disposable Email Address Providers list.\n \"\"\"\n\n\nclass BadEmail(BaseDict):\n \"\"\"Object to cointain all scoring and blacklist analysis for Email about SMTP server, main Domain, MX and NS records and IP address.\n\n Keyword Arguments:\n - ``email_data``: Dictionary containing all the email analysis data as described in https://apility.io/apidocs/#email\n\n Attributes:\n - ``score``: Number describing the result of the algorithm. Negative means 'suspicious' or 'bad' domain. Neutral or positivo means it's a 'clean' email.\n - ``domain``: Object :func:`~apilityio.model.Domain` containing the 'domainname score' information as result of the analysis of the domains.\n - ``ip``: Object :func:`~apilityio.model.IP` containing the 'ip score' information as result of the analysis of the IP of the domain.\n - ``source_ip``: Object :func:`~apilityio.model.IP` containing the 'ip score' information as result of the analysis of the IP origin of the request.\n - ``address``: Object :func:`~apilityio.model.EmailAddress` containing the 'address score' object as result of the analysis of the email.\n - ``smtp``: Object :func:`~apilityio.model.SMTPInfo` containing the 'smtp score' object as result of the analysis of the email service.\n - ``freemail``: Object :func:`~apilityio.model.FreeEmail` containing the 'freemail score' object as result of the analysis of the email provider.\n - ``email``: Object :func:`~apilityio.model.EmailScore` containing the 'email-blacklist score' object as result of the look up in the email blacklists.\n - ``disposable``: Object :func:`~apilityio.model.DisposableEmail` containing the 'disposable score' object as result of the analysis of the email provider.\n \"\"\"\n\n def __init__(self, email_data):\n super(BadEmail, self).__init__(email_data)\n self.domain = Domain(email_data['domain'])\n self.ip = IP(email_data['ip'])\n self.source_ip = IP(email_data['source_ip'])\n self.address = EmailAddress(email_data['address'])\n self.smtp = SMTPInfo(email_data['smtp'])\n self.freemail = FreeEmail(email_data['freemail'])\n self.email = EmailScore(email_data['email'])\n self.disposable = DisposableEmail(email_data['disposable'])\n\n\nclass BadEmailResponse(Response):\n \"\"\"Response object with the result of a query to check if the Domain and its MX and NS records have been found in any blacklist.\n\n Keyword Arguments:\n - ``status_code``: An integer with the HTTP response status code. See https://apility.io/apidocs/#errors\n - ``error``: If status code is not 200 (OK) or 404 (NOT_FOUND), the error returned by the server.\n - ``json``: JSON object returned by the REST API without modifications.\n - ``email_data``: Dictionary containing all the email analysis data as described in https://apility.io/apidocs/#email\n\n Attributes:\n - ``status_code``: An integer with the HTTP response status code. See https://apility.io/apidocs/#errors\n - ``error``: If status code is not 200 (OK) or 404 (NOT_FOUND), the error returned by the server.\n - ``json``: JSON object returned by the REST API without modifications.\n - ``response``: Object :func:`~apilityio.model.BadEmail` containing all scoring and blacklists of the Email.\n \"\"\"\n\n def __init__(self, status_code=requests.codes.ok, error=None, email_data=None, json=None):\n super(BadEmailResponse, self).__init__(\n status_code=status_code, error=error, json=json)\n if email_data is not None:\n self.response = BadEmail(email_data)\n else:\n self.response = None\n\n\nclass EmailScored(object):\n \"\"\"Object to pair Email and the result of the scoring process\n\n Keyword Arguments:\n - ``email``: the email address of the pair\n - ``scored_email``: an Object :func:`~apilityio.model.BadEmail` with the scoring information\n\n Attributes:\n - ``email``: the email address of the pair\n - ``scoring``: an Object :func:`~apilityio.model.BadEmail` with the scoring information\n \"\"\"\n\n def __init__(self, email, scored_email):\n self.email = email\n self.scoring = scored_email\n\n\nclass BadBatchEmailResponse(Response):\n \"\"\"Response object with the result of a query to get the analysis data of multiple emails.\n\n Keyword Arguments:\n - ``status_code``: An integer with the HTTP response status code\n - ``error``: If status code is not 200 (OK), the error returned by the server.\n - ``json``: JSON object returned by the REST API without modifications.\n - ``email_scoring_list``: List of :func:`~apilityio.model.EmailScored` objects.\n\n Attributes:\n - ``status_code``: An integer with the HTTP response status code\n - ``error``: If status code is not 200 (OK), the error returned by the server.\n - ``json``: JSON object returned by the REST API without modifications.\n - ``email_scoring_list``: List of :func:`~apilityio.model.EmailScored` objects.\n \"\"\"\n\n def __init__(self, status_code=requests.codes.ok, error=None, email_scoring_list=[], json=None):\n super(BadBatchEmailResponse, self).__init__(\n status_code=status_code, error=error, json=json)\n self.email_scoring_list = email_scoring_list\n\n\nclass ASResponse(Response):\n \"\"\"Response object with the result of a query to get Autonomous System information.\n\n Keyword Arguments:\n - ``status_code``: An integer with the HTTP response status code. See https://apility.io/apidocs/#errors\n - ``error``: If status code is not 200 (OK) or the error returned by the server.\n - ``asystem``: Dictionary containing all the autonomous system information described in https://apility.io/apidocs/#as\n\n Attributes:\n - ``status_code``: An integer with the HTTP response status code. See https://apility.io/apidocs/#errors\n - ``error``: If status code is not 200 (OK) or the error returned by the server.\n - ``asystem``: Object :func:`~apilityio.model.ASystem` containing all autonomous system attributes.\n \"\"\"\n\n def __init__(self, status_code=requests.codes.ok, error=None, asystem=None, json=None):\n super(ASResponse, self).__init__(\n status_code=status_code, error=error, json=json)\n if asystem is not None:\n self.asystem = ASystem(asystem)\n else:\n self.asystem = None\n\n\nclass ASBatchIPResponse(Response):\n \"\"\"Response object with the result of a query to get the Autonomous System information of multiple IP addresses.\n\n Keyword Arguments:\n - ``status_code``: An integer with the HTTP response status code\n - ``error``: If status code is not 200 (OK), the error returned by the server.\n - ``json``: JSON object returned by the REST API without modifications.\n - ``asystem_ip_list``: List of :func:`~apilityio.model.IPASystem` objects.\n\n Attributes:\n - ``status_code``: An integer with the HTTP response status code\n - ``error``: If status code is not 200 (OK), the error returned by the server.\n - ``json``: JSON object returned by the REST API without modifications.\n - ``asystem_ip_list``: List of :func:`~apilityio.model.IPASystem` objects.\n \"\"\"\n\n def __init__(self, status_code=requests.codes.ok, error=None, asystem_ip_list=[], json=None):\n super(ASBatchIPResponse, self).__init__(\n status_code=status_code, error=error, json=json)\n self.asystem_ip_list = asystem_ip_list\n\n\nclass IPASystem(object):\n \"\"\"Object to pair IP adress and Autonomous System information. This object contains an IP address and its AS information.\n\n Keyword Arguments:\n - ``ip_address``: the ip address of the pair\n - ``as_data``: an Object :func:`~apilityio.model.ASystem` object with the autonomous system information\n\n Attributes:\n - ``ip_address``: the ip address of the pair\n - ``asystem``: an Object :func:`~apilityio.model.ASystem` object with the autonomous system information\n \"\"\"\n\n def __init__(self, ip_address, as_data):\n self.ip_address = ip_address\n self.asystem = as_data\n\n\nclass ASNASystem(object):\n \"\"\"Object to pair AS numbers and Autonomous System information. This object contains an AS number and its AS information.\n\n Keyword Arguments:\n - ``as_number``: the AS number of the object.\n - ``as_data``: an Object :func:`~apilityio.model.ASystem` object with the autonomous system information\n\n Attributes:\n - ``asn``: the AS number of the object.\n - ``asystem``: an Object :func:`~apilityio.model.ASystem` object with the autonomous system information\n \"\"\"\n\n def __init__(self, as_number, as_data):\n self.asn = as_number\n self.asystem = as_data\n\n\nclass ASBatchNumResponse(Response):\n \"\"\"Response object with the result of a query to get the Autonomous System information of multiple AS numbers.\n\n Keyword Arguments:\n - ``status_code``: An integer with the HTTP response status code\n - ``error``: If status code is not 200 (OK), the error returned by the server.\n - ``json``: JSON object returned by the REST API without modifications.\n - ``asystem_asn_list``: List of :func:`~apilityio.model.ASNASystem` objects.\n\n Attributes:\n - ``status_code``: An integer with the HTTP response status code\n - ``error``: If status code is not 200 (OK), the error returned by the server.\n - ``json``: JSON object returned by the REST API without modifications.\n - ``asystem_asn_list``: List of :func:`~apilityio.model.ASNASystem` objects.\n \"\"\"\n\n def __init__(self, status_code=requests.codes.ok, error=None, asystem_num_list=[], json=None):\n super(ASBatchNumResponse, self).__init__(\n status_code=status_code, error=error, json=json)\n self.asystem_asn_list = asystem_num_list\n\n\nclass WhoisIPResponse(Response):\n \"\"\"Response object with the result of a query to get WHOIS information of an IP address.\n\n Keyword Arguments:\n - ``status_code``: An integer with the HTTP response status code. See https://apility.io/apidocs/#errors\n - ``error``: If status code is not 200 (OK) or the error returned by the server.\n - ``json``: JSON object returned by the REST API without modifications.\n - ``whois``: Dict structure with the WHOIS IP information described in https://apility.io/apidocs/#whois\n\n Attributes:\n - ``status_code``: An integer with the HTTP response status code. See https://apility.io/apidocs/#errors\n - ``error``: If status code is not 200 (OK) or the error returned by the server.\n - ``json``: JSON object returned by the REST API without modifications.\n - ``whois``: Object :func:`~apilityio.model.WhoisIP` containing all WHOIS objects and attributes.\n \"\"\"\n\n def __init__(self, status_code=requests.codes.ok, error=None, whois=None, json=None):\n super(WhoisIPResponse, self).__init__(\n status_code=status_code, error=error, json=json)\n if whois is not None:\n self.whois = WhoisIP(whois)\n else:\n self.whois = None\n\n\nclass WhoisIP(BaseDict):\n \"\"\"Object to cointain all WHOIS data of the IP address.\n\n Keyword Arguments:\n - ``whois``: Dictionary containing all the WHOIS data as described in https://apility.io/apidocs/#whois\n\n Attributes:\n - ``query``: The IP address\n - ``asn``: Globally unique identifier used for routing information exchange with Autonomous Systems.\n - ``asn_cidr``: Network routing block assigned to an ASN.\n - ``asn_country_code``: ASN assigned country code in ISO 3166-1 format.\n - ``asn_date``: ASN allocation date in ISO 8601 format.\n - ``asn_registry``: ASN assigned regional internet registry.\n - ``asn_description``: The ASN description\n - ``network``: Object containing the :func:`~apilityio.model.WhoisNetwork` data.\n - ``entities``: list of object names referenced by an RIR network. Map these to the objects keys.\n - ``objects``: List of objects containing the :func:`~apilityio.model.WhoisObject` data.\n \"\"\"\n\n def __init__(self, whois):\n super(WhoisIP, self).__init__(whois)\n self.network = WhoisNetwork(whois['network'])\n self.objects = [WhoisObject(value)\n for key, value in whois['objects'].items()]\n\n\nclass WhoisObject(BaseDict):\n \"\"\"Object to cointain all WHOIS data (entity) in the objects list within the WHOIS.\n\n Keyword Arguments:\n - ``whoisobject``: Dictionary containing all the WHOIS OBJECT data as described in https://apility.io/apidocs/#whois-object\n\n Attributes:\n - ``contact``: \tObject containing the :func:`~apilityio.model.WhoisObjectContact` data. Contact information registered with an RIR object.\n - ``entities``: List of object names referenced by an RIR object. Map these to other objects keys.\n - ``events``: List of objects containing the :func:`~apilityio.model.WhoisEvent` data. List of event dictionaries.\n - ``events_actor``: List of objects containing the :func:`~apilityio.model.WhoisEvent` as events (no actor).\n - ``handle``: Unique identifier for a registered object.\n - ``links``: List of HTTP/HTTPS links provided for an RIR object.\n - ``notices``: List of objects containing the :func:`~apilityio.model.WhoisNotice`. List of notice dictionaries.\n - ``remarks``:List of objects containing the :func:`~apilityio.model.WhoisNotice`. List of remark (notice) dictionaries.\n - ``roles``: List of roles assigned to a registered object.\n - ``status``: List indicating the state of a registered object.\n \"\"\"\n\n def __init__(self, whoisobject):\n super(WhoisObject, self).__init__(whoisobject)\n self.contact = WhoisObjectContact(whoisobject['contact'])\n events = whoisobject['events']\n if events:\n self.events = [WhoisEvent(event) for event in events]\n else:\n self.events = []\n notices = whoisobject['notices']\n if notices:\n self.notices = [WhoisNotice(notice) for notice in notices]\n else:\n self.notices = []\n remarks = whoisobject['remarks']\n if remarks:\n self.remarks = [WhoisRemark(remark) for remark in remarks]\n else:\n self.remarks = []\n\n\nclass WhoisNetwork(BaseDict):\n \"\"\"Object to cointain all WHOIS data (entity) in the network within the WHOIS.\n\n Keyword Arguments:\n - ``whoisnetwork``: Dictionary containing all the WHOIS NETWORK data as described in https://apility.io/apidocs/#whois-network\n\n Attributes:\n - ``cidr``: Network routing block an IP address belongs to.\n - ``country``: Country code registered with the RIR in ISO 3166-1 format.\n - ``end_address``: The last IP address in a network block.\n - ``events``: List of objects containing the :func:`~apilityio.model.WhoisEvent` data. List of event dictionaries.\n - ``handle``: Unique identifier for a registered object.\n - ``ip_version``: IP protocol version (v4 or v6) of an IP address.\n - ``links``: HTTP/HTTPS links provided for an RIR object.\n - ``name``: The identifier assigned to the network registration for an IP address.\n - ``notices``: List of objects containing the :func:`~apilityio.model.WhoisNotice`. List of notice dictionaries.\n - ``parent_handle``: Unique identifier for the parent network of a registered network.\n - ``remarks``: List of objects containing the :func:`~apilityio.model.WhoisNotice`. List of remark (notice) dictionaries.\n - ``start_address``: The first IP address in a network block.\n - ``status``: List indicating the state of a registered object.\n - ``type``: The RIR classification of a registered network.\n \"\"\"\n\n def __init__(self, whoisnetwork):\n super(WhoisNetwork, self).__init__(whoisnetwork)\n events = whoisnetwork['events']\n if events:\n self.events = [WhoisEvent(event) for event in events]\n else:\n self.events = []\n notices = whoisnetwork['notices']\n if notices:\n self.notices = [WhoisNotice(notice) for notice in notices]\n else:\n self.notices = []\n remarks = whoisnetwork['remarks']\n if remarks:\n self.remarks = [WhoisRemark(remark) for remark in remarks]\n else:\n self.remarks = []\n\n\nclass WhoisEvent(BaseDict):\n \"\"\"Object to cointain all WHOIS data (entity) in the events within the WHOIS.\n\n Keyword Arguments:\n - ``event``: Dictionary containing all the WHOIS EVENT data as described in https://apility.io/apidocs/#whois-event\n\n Attributes:\n - ``action``: The reason for an event.\n - ``timestamp``: The date an event occured in ISO 8601 format.\n - ``actor``: The identifier for an event initiator (if any).\n \"\"\"\n\n\nclass WhoisNotice(BaseDict):\n \"\"\"Object to cointain all WHOIS data (entity) in the notices within the WHOIS.\n\n Keyword Arguments:\n - ``notice``: Dictionary containing all the WHOIS NOTICE data as described in https://apility.io/apidocs/#whois-notice\n\n Attributes:\n - ``title``: The title/header for a notice.\n - ``description``: The description/body of a notice.\n - ``links``: list of HTTP/HTTPS links provided for a notice.\n \"\"\"\n\n\nclass WhoisRemark(BaseDict):\n \"\"\"Object to cointain all WHOIS data (entity) in the remarks within the WHOIS.\n\n Keyword Arguments:\n - ``remark``: Dictionary containing all the WHOIS REMARK data as described in https://apility.io/apidocs/#whois-notice\n\n Attributes:\n - ``title``: The title/header for a notice.\n - ``description``: The description/body of a notice.\n - ``links``: list of HTTP/HTTPS links provided for a notice.\n \"\"\"\n\n\nclass WhoisObjectContact(BaseDict):\n \"\"\"Object to cointain all WHOIS data (entity) in the object contact within the WHOIS.\n\n Keyword Arguments:\n - ``object_contact``: Dictionary containing all the WHOIS OBJECT CONTACT data as described in https://apility.io/apidocs/#whois-object-contact\n\n Attributes:\n - ``address``: List of contact postal address dictionaries. Contains key type and value.\n - ``email``: List of contact email address dictionaries. Contains key type and value.\n - ``kind``: The contact information kind (individual, group, org).\n - ``name``: The contact name.\n - ``phone``: List of contact phone number dictionaries. Contains key type and value.\n - ``role``: The contact's role.\n - ``title``: The contact's position or job title.\n \"\"\"\n\n\nclass HistoryIPResponse(Response):\n \"\"\"Response object with the result of a query to get the historical information of an IP address.\n\n Keyword Arguments:\n - ``status_code``: An integer with the HTTP response status code. See https://apility.io/apidocs/#errors\n - ``error``: If status code is not 200 (OK) or the error returned by the server.\n - ``json``: JSON object returned by the REST API without modifications.\n - ``history``: Dict structure with the list of ip transactions described in https://apility.io/apidocs/#transaction-ip\n\n Attributes:\n - ``status_code``: An integer with the HTTP response status code. See https://apility.io/apidocs/#errors\n - ``error``: If status code is not 200 (OK) or the error returned by the server.\n - ``json``: JSON object returned by the REST API without modifications.\n - ``history``: List of Objects :func:`~apilityio.model.HistoryIP` containing all transaction historical data.\n \"\"\"\n\n def __init__(self, status_code=requests.codes.ok, error=None, history=None, json=None):\n super(HistoryIPResponse, self).__init__(\n status_code=status_code, error=error, json=json)\n self.history = []\n if history:\n for item in history:\n self.history.append(HistoryIP(item))\n\n\nclass HistoryIP(BaseDict):\n \"\"\"Object to cointain the detals of a transaction of IP address in our database.\n\n Keyword Arguments:\n - ``transaction_ip``: Dictionary containing all the transaction IP address details as described in https://apility.io/apidocs/#transaction-ip\n\n Attributes:\n - ``timestamp``: The UNIX time in seconds when the transaction was performed.\n - ``command``: 'add' or 'rem'. Type of transaction in the database: ADD to the blacklist or REMove of the blacklist.\n - ``ip``: IP address of the transaction\n - ``blacklist_change``: Blackist added or removed thanks to the transaction.\n - ``blacklists``: List of blacklists after the execution of the command and the blacklist change.\n \"\"\"\n\n\nclass HistoryDomainResponse(Response):\n \"\"\"Response object with the result of a query to get the historical information of a domain.\n\n Keyword Arguments:\n - ``status_code``: An integer with the HTTP response status code. See https://apility.io/apidocs/#errors\n - ``error``: If status code is not 200 (OK) or the error returned by the server.\n - ``json``: JSON object returned by the REST API without modifications.\n - ``history``: Dict structure with the list of domains transactions described in https://apility.io/apidocs/#transaction-domain\n\n Attributes:\n - ``status_code``: An integer with the HTTP response status code. See https://apility.io/apidocs/#errors\n - ``error``: If status code is not 200 (OK) or the error returned by the server.\n - ``json``: JSON object returned by the REST API without modifications.\n - ``history``: List of Objects :func:`~apilityio.model.HistoryDomain` containing all transaction historical data.\n \"\"\"\n\n def __init__(self, status_code=requests.codes.ok, error=None, history=None, json=None):\n super(HistoryDomainResponse, self).__init__(\n status_code=status_code, error=error, json=json)\n self.history = []\n if history:\n for item in history:\n self.history.append(HistoryDomain(item))\n\n\nclass HistoryDomain(BaseDict):\n \"\"\"Object to cointain the detals of a transaction of domain in our database.\n\n Keyword Arguments:\n - ``transaction_domain``: Dictionary containing all the transaction domain details as described in https://apility.io/apidocs/#transaction-domain\n\n Attributes:\n - ``timestamp``: The UNIX time in seconds when the transaction was performed.\n - ``command``: 'add' or 'rem'. Type of transaction in the database: ADD to the blacklist or REMove of the blacklist.\n - ``domain``: Domain of the transaction\n - ``blacklist_change``: Blackist added or removed thanks to the transaction.\n - ``blacklists``: List of blacklists after the execution of the command and the blacklist change.\n \"\"\"\n\n\nclass HistoryEmailResponse(Response):\n \"\"\"Response object with the result of a query to get the historical information of an email.\n\n Keyword Arguments:\n - ``status_code``: An integer with the HTTP response status code. See https://apility.io/apidocs/#errors\n - ``error``: If status code is not 200 (OK) or the error returned by the server.\n - ``json``: JSON object returned by the REST API without modifications.\n - ``history``: Dict structure with the list of email transactions described in https://apility.io/apidocs/#transaction-email\n\n Attributes:\n - ``status_code``: An integer with the HTTP response status code. See https://apility.io/apidocs/#errors\n - ``error``: If status code is not 200 (OK) or the error returned by the server.\n - ``json``: JSON object returned by the REST API without modifications.\n - ``history``: List of Objects :func:`~apilityio.model.HistoryEmail` containing all transaction historical data.\n \"\"\"\n\n def __init__(self, status_code=requests.codes.ok, error=None, history=None, json=None):\n super(HistoryEmailResponse, self).__init__(\n status_code=status_code, error=error, json=json)\n self.history = []\n if history:\n for item in history:\n self.history.append(HistoryEmail(item))\n\n\nclass HistoryEmail(BaseDict):\n \"\"\"Object to cointain the detals of a transaction of email in our database.\n\n Keyword Arguments:\n - ``transaction_email``: Dictionary containing all the transaction email details as described in https://apility.io/apidocs/#transaction-email\n\n Attributes:\n - ``timestamp``: The UNIX time in seconds when the transaction was performed.\n - ``command``: 'add' or 'rem'. Type of transaction in the database: ADD to the blacklist or REMove of the blacklist.\n - ``email``: Email of the transaction\n - ``blacklist_change``: Blackist added or removed thanks to the transaction.\n - ``blacklists``: List of blacklists after the execution of the command and the blacklist change.\n \"\"\"\n\n\nclass QuarantineIPResponse(Response):\n \"\"\"Response object with the result of a query to get the IP addresses in the quarantine of the user.\n\n Keyword Arguments:\n - ``status_code``: An integer with the HTTP response status code. See https://apility.io/apidocs/#errors\n - ``error``: If status code is not 200 (OK) or the error returned by the server.\n - ``json``: JSON object returned by the REST API without modifications.\n - ``quarantine``: Dict structure with the list of pairs of IP addresses and TTL to stay in the quarantine\n\n Attributes:\n - ``status_code``: An integer with the HTTP response status code. See https://apility.io/apidocs/#errors\n - ``error``: If status code is not 200 (OK) or the error returned by the server.\n - ``json``: JSON object returned by the REST API without modifications.\n - ``quarantine``: List of Objects :func:`~apilityio.model.QuarantineIP` containing the pair IP address and TTL.\n \"\"\"\n\n def __init__(self, status_code=requests.codes.ok, error=None, quarantine=None, json=None):\n super(QuarantineIPResponse, self).__init__(\n status_code=status_code, error=error, json=json)\n self.quarantine = []\n if quarantine:\n for item in quarantine:\n self.quarantine.append(QuarantineIP(item))\n\n\nclass QuarantineIP(BaseDict):\n \"\"\"Object to cointain the IP address and the Time to Live of the IP address in the quarantine list.\n\n Keyword Arguments:\n - ``quarantine_ip``: Dictionary containing the IP address and the TTL.\n\n Attributes:\n - ``ip``: IP address to add to QUARANTINE-IP blacklist.\n - ``ttl``: Time to Live in seconds of the IP in the blacklist.\n \"\"\"\n\n\nclass QuarantineCountryResponse(Response):\n \"\"\"Response object with the result of a query to get the countries in the quarantine of the user.\n\n Keyword Arguments:\n - ``status_code``: An integer with the HTTP response status code. See https://apility.io/apidocs/#errors\n - ``error``: If status code is not 200 (OK) or the error returned by the server.\n - ``json``: JSON object returned by the REST API without modifications.\n - ``quarantine``: Dict structure with the list of pairs of countries and TTL to stay in the quarantine\n\n Attributes:\n - ``status_code``: An integer with the HTTP response status code. See https://apility.io/apidocs/#errors\n - ``error``: If status code is not 200 (OK) or the error returned by the server.\n - ``json``: JSON object returned by the REST API without modifications.\n - ``quarantine``: List of Objects :func:`~apilityio.model.QuarantineCountry` containing the pair Country and TTL.\n \"\"\"\n\n def __init__(self, status_code=requests.codes.ok, error=None, quarantine=None, json=None):\n super(QuarantineCountryResponse, self).__init__(\n status_code=status_code, error=error, json=json)\n self.quarantine = []\n if quarantine:\n for item in quarantine:\n self.quarantine.append(QuarantineCountry(item))\n\n\nclass QuarantineCountry(BaseDict):\n \"\"\"Object to cointain the Country and the Time to Live of the country in the quarantine list.\n\n Keyword Arguments:\n - ``quarantine_country``: Dictionary containing the Country and the TTL.\n\n Attributes:\n - ``country``: Country to add to QUARANTINE-COUNTRY blacklist.\n - ``ttl``: Time to Live in seconds of the country in the blacklist.\n \"\"\"\n\n\nclass QuarantineContinentResponse(Response):\n \"\"\"Response object with the result of a query to get the continents in the quarantine of the user.\n\n Keyword Arguments:\n - ``status_code``: An integer with the HTTP response status code. See https://apility.io/apidocs/#errors\n - ``error``: If status code is not 200 (OK) or the error returned by the server.\n - ``json``: JSON object returned by the REST API without modifications.\n - ``quarantine``: Dict structure with the list of pairs of continents and TTL to stay in the quarantine\n\n Attributes:\n - ``status_code``: An integer with the HTTP response status code. See https://apility.io/apidocs/#errors\n - ``error``: If status code is not 200 (OK) or the error returned by the server.\n - ``json``: JSON object returned by the REST API without modifications.\n - ``quarantine``: List of Objects :func:`~apilityio.model.QuarantineContinent` containing the pair Continent and TTL.\n \"\"\"\n\n def __init__(self, status_code=requests.codes.ok, error=None, quarantine=None, json=None):\n super(QuarantineContinentResponse, self).__init__(\n status_code=status_code, error=error, json=json)\n self.quarantine = []\n if quarantine:\n for item in quarantine:\n self.quarantine.append(QuarantineContinent(item))\n\n\nclass QuarantineContinent(BaseDict):\n \"\"\"Object to cointain the Continent and the Time to Live of the continent in the quarantine list.\n\n Keyword Arguments:\n - ``quarantine_continent``: Dictionary containing the Continent and the TTL.\n\n Attributes:\n - ``continent``: Country to add to QUARANTINE-CONTINENT blacklist.\n - ``ttl``: Time to Live in seconds of the continent in the blacklist.\n \"\"\"\n\n\nclass QuarantineASResponse(Response):\n \"\"\"Response object with the result of a query to get the Autonomous System in the quarantine of the user.\n\n Keyword Arguments:\n - ``status_code``: An integer with the HTTP response status code. See https://apility.io/apidocs/#errors\n - ``error``: If status code is not 200 (OK) or the error returned by the server.\n - ``json``: JSON object returned by the REST API without modifications.\n - ``quarantine``: Dict structure with the list of pairs of AS and TTL to stay in the quarantine\n\n Attributes:\n - ``status_code``: An integer with the HTTP response status code. See https://apility.io/apidocs/#errors\n - ``error``: If status code is not 200 (OK) or the error returned by the server.\n - ``json``: JSON object returned by the REST API without modifications.\n - ``quarantine``: List of Objects :func:`~apilityio.model.QuarantineAS` containing the pair AS and TTL.\n \"\"\"\n\n def __init__(self, status_code=requests.codes.ok, error=None, quarantine=None, json=None):\n super(QuarantineASResponse, self).__init__(\n status_code=status_code, error=error, json=json)\n self.quarantine = []\n if quarantine:\n for item in quarantine:\n self.quarantine.append(QuarantineAS(item))\n\n\nclass QuarantineAS(BaseDict):\n \"\"\"Object to cointain the AS and the Time to Live of the continent in the quarantine list.\n\n Keyword Arguments:\n - ``quarantine_as``: Dictionary containing the AS and the TTL.\n\n Attributes:\n - ``asn``: Country to add to QUARANTINE-AS blacklist.\n - ``ttl``: Time to Live in seconds of the continent in the blacklist.\n \"\"\"\n" }, { "alpha_fraction": 0.7246537208557129, "alphanum_fraction": 0.821052610874176, "avg_line_length": 57.225807189941406, "blob_id": "be69059d8890937284d8b3ffa919132474f8552d", "content_id": "e683f7e32a520d0c0e3c93b5d274dc54020228c2", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1805, "license_type": "permissive", "max_line_length": 140, "num_lines": 31, "path": "/CHANGELOG.md", "repo_name": "willbengtson/python-sdk", "src_encoding": "UTF-8", "text": "6a83fe8 (HEAD -> dev) Added new 'json' property to all Reponse objects with the raw JSON response from the API.\n97d0433 (origin/master, origin/dev, origin/HEAD, master) Formatted code with autopep8 and passed pylint. Still warning about lines too long.\n0279706 (tag: v0.0.2) Added reference to Read the Docs and badges\n9a6c127 (tag: v0.0.1) Changelog updated\nf7e543e Latest readme.md but still missing the refernce to ReadTheDocs\nf17da6d Added quickstart and cross references\na332443 Add examples of basic usage of the API library\n1950bb6 New readme documentation (in progress)\nb4a9aca Sphinx documentation\n54f2498 Added extensive documentation to the public methods and attributes\n43d1e5d Added py-ipaddress 2.7 dependency only for Python 2.7\n2da7ee5 Added py-ipaddress 2.7 dependency Using time.time() for backwards compatibility with 2.7\n031a5db Fixed broken imports\na2a3e53 Imports for backwards compatibility with 2.7\n5bef2ea Lists from Dict for backwards compatibility with 2.7\n5627c2a Imports for backwards compatibility with 2.7\n08d3a8e Enable builds for Python 2.7, 3.4, 3.5 and 3.6\ne1754fb Added more delays\n983d527 Disable debug verbosity\ne34efad Add delays to allow remote databases synchornization\nb2f8a49 Changed assert on ttl to less than because travis is slow. Enabled debug logs\n023904e Removed conflict char for 2.7 builds\nd2b2b8c Test build only for 3.6\n46d502a Removed Makefile not used. Refactored LICENSE to LICENSE.txt\n0117d38 Refactored anonymous and api-key based tests to run safely in CI environments like Travis\nc2269bd Removed non utf8 compliant code\nbb9d4f7 Simplify build and test process: using setup.py and unittest\n83b583b Comments on how to use the APILITYIO_API_KEY\n8c3e14b Sample documents to test CI\n3189636 First commit of python API library\n1662746 Initial commit\n" }, { "alpha_fraction": 0.6161616444587708, "alphanum_fraction": 0.6161616444587708, "avg_line_length": 29.461538314819336, "blob_id": "f29b8bc6f9456451c46b452f4a7c00fdd932e9af", "content_id": "c20e1343a40d07f3c3f5e6366385ee2c9c748d02", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 396, "license_type": "permissive", "max_line_length": 72, "num_lines": 13, "path": "/apility-runner.py", "repo_name": "willbengtson/python-sdk", "src_encoding": "UTF-8", "text": "\"\"\"Convenience wrapper for running apility directly from source tree.\"\"\"\n\nimport unittest\nimport xmlrunner\n\nif __name__ == '__main__':\n with open('test-results.xml', 'wb') as output:\n unittest.main(\n module='tests.client_test',\n testRunner=xmlrunner.XMLTestRunner(output=output),\n failfast=False,\n buffer=False,\n catchbreak=False)\n" }, { "alpha_fraction": 0.6490618586540222, "alphanum_fraction": 0.6733843088150024, "avg_line_length": 31.704545974731445, "blob_id": "02628728e7947ef9a87a9669c2e8e171b7fa3ab9", "content_id": "5973a1a713246fd15f5ac1370f40485440ee10c8", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2878, "license_type": "permissive", "max_line_length": 115, "num_lines": 88, "path": "/setup.py", "repo_name": "willbengtson/python-sdk", "src_encoding": "UTF-8", "text": "\"\"\"\n Copyright 2018 CAPITAL LAB OU\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport os\nimport re\nimport sys\n\n# Always prefer setuptools over distutils\nfrom setuptools import setup, find_packages\n# To use a consistent encoding\nfrom codecs import open\nfrom os import path\n\nPACKAGES = ['apilityio', 'tests']\n\nDEPENDENCIES = ['requests>=2.0.0,<3.0.0', 'validators>=0.12.2,<1.0.0', 'py2-ipaddress<=3.4.1;python_version<\"3.4\"']\n\n# Note: Breaking change introduced in pyfakefs 3.3.\nTEST_DEPENDENCIES = ['mock>=2.0.0,<3.0.0', 'pyfakefs>=3.2,<3.3',\n 'six>=1.11.0,<2.0.0', 'validators>=0.12.2,<1.0.0',\n 'unittest-xml-reporting']\n\nCLASSIFIERS = [\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Security',\n 'Topic :: Software Development :: Libraries :: Python Modules'\n]\n\nhere = path.abspath(path.dirname(__file__))\n\n# Get the long description from the README file\nwith open(path.join(here, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\ndef GetVersion():\n \"\"\"Gets the version from apilityio/common.py.\n\n We can't import this directly because new users would get ImportErrors on our\n third party dependencies.\n\n Returns:\n The version of the library.\n \"\"\"\n with open(os.path.join('apilityio', 'common.py')) as versions_file:\n source = versions_file.read()\n return re.search('\\\\nVERSION = \\'(.*?)\\'', source).group(1)\n\n\nextra_params = {}\nif sys.version_info[0] == 3:\n extra_params['use_2to3'] = True\n\nsetup(name='apilityio-lib',\n version=GetVersion(),\n description='Apility.io Python Client Library',\n author='Apility.io Devops Team',\n author_email='devops@apility.io',\n url='https://github.com/Apilityio/python-lib',\n license='Apache License 2.0',\n long_description=long_description,\n packages=PACKAGES,\n platforms='any',\n keywords='apilityio apility abuse malicious',\n classifiers=CLASSIFIERS,\n install_requires=DEPENDENCIES,\n tests_require=TEST_DEPENDENCIES,\n test_suite='tests',\n **extra_params)\n" }, { "alpha_fraction": 0.7323026657104492, "alphanum_fraction": 0.7363710403442383, "avg_line_length": 40.62711715698242, "blob_id": "db61404b66a2b095baf18e72d387d3e21c45c247", "content_id": "d8ffdd1362092b1a9d7c686d0b8c92762a26c0f9", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2458, "license_type": "permissive", "max_line_length": 244, "num_lines": 59, "path": "/README.md", "repo_name": "willbengtson/python-sdk", "src_encoding": "UTF-8", "text": "Apility.io Python Client Library\n================================\n\n[![Documentation Status](https://readthedocs.org/projects/apilityio-python-lib/badge/?version=latest)](https://apilityio-python-lib.readthedocs.io/en/latest/?badge=latest)\n[![Build Status](https://travis-ci.org/Apilityio/python-lib.svg?branch=dev)](https://travis-ci.org/Apilityio/python-lib)\n\n\nIntroduction\n------------\n\nApility.io can be defined as Threat Intelligence SaaS for developers and product companies that want to know in realtime if their existing or potential users have been classified as 'abusers' by one or more of these lists.\n\nAutomatic extraction processes extracts all the information in realtime, keeping the most up to date data available, saving yourself the hassle of extract and update regularly all these lists and the data.\n\n\nWhat does Apility.io offer?\n---------------------------\n\nApility.io offers an extremely simple and minimalistic API to access in realtime to these lists and do the following simple question about the resource?\n\nIs this IP, domain or email stored in any blacklist?\n\nThe answers to this question can be:\n\n* YES: The resource can be found in an abusers' list. This is a bad resource.\n* NO: The resource cannot be found in any abusers' list. This is a clean resource.\n\nA bad resource implies some kind of action from developers' side. A clean resource does not need any action from their side.\n\n\nSupported Python Versions\n-------------------------\n\nThis library is supported for Python 2 and 3, for versions 2.7+ and 3.4+ respectively. It is recommended that Python 2 users use python 2.7.9+ to take advantage of the SSL Certificate Validation feature that is not included in earlier versions.\n\nInstallation\n------------\n\nYou can install the Apility.io Python Client Library with _pip_:\n\n```\n\n $ sudo pip install apilityio-lib\n\n```\n\nAPI Documentation\n-----------------\nYou can read the Python documentation here: http://apilityio-python-lib.readthedocs.io/en/latest/\n\nYou can also read the original REST API specification here: https://apility.io/apidocs\n\nExamples\n--------\nIf you would like to obtain example code for any of the included client libraries, you can find it on our **examples** folder.\n\nContact Us\n----------\nDo you have an issue using the Apilityio Client Libraries? Or perhaps some feedback for how we can improve them? Feel free to let us know on our _`issue tracker <https://github.com/Apilityio/python-lib/issues>`_.\n\n\n" }, { "alpha_fraction": 0.6176470518112183, "alphanum_fraction": 0.6176470518112183, "avg_line_length": 12.800000190734863, "blob_id": "fe98b5ce6753719bb12bd4896351c34664909644", "content_id": "a75241b7855c1ca737b1afde18e9ee09c8847e75", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 68, "license_type": "permissive", "max_line_length": 32, "num_lines": 5, "path": "/docs/exceptions.rst", "repo_name": "willbengtson/python-sdk", "src_encoding": "UTF-8", "text": "Exceptions\n==========\n\n.. automodule:: apilityio.errors\n :members:" }, { "alpha_fraction": 0.7081565260887146, "alphanum_fraction": 0.7142857313156128, "avg_line_length": 31.630769729614258, "blob_id": "41b9a26209ca323f98e65572cb966e138f7c2c49", "content_id": "7a5759ccaca1f8c9fe780b795bad65ce580af34f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 2121, "license_type": "permissive", "max_line_length": 230, "num_lines": 65, "path": "/docs/index.rst", "repo_name": "willbengtson/python-sdk", "src_encoding": "UTF-8", "text": ".. apilityio-lib documentation master file, created by\n sphinx-quickstart on Wed Jul 18 19:51:44 2018.\n You can adapt this file completely to your liking, but it should at least\n contain the root `toctree` directive.\n\nWelcome to Apilityio Python Library's documentation!\n====================================================\n\n.. toctree::\n :maxdepth: 2\n :caption: Contents:\n\n installation\n quickstart\n apireference\n responseobjects\n exceptions\n\nIndices and tables\n==================\n\n* :ref:`genindex`\n* :ref:`modindex`\n* :ref:`search`\n\n\nIntroduction\n------------\n\nhttps://Apility.io can be defined as Threat Intelligence SaaS for developers and product companies that want to know in realtime if their existing or potential users have been classified as 'abusers' by one or more of these lists.\n\nAutomatic extraction processes extracts all the information in realtime, keeping the most up to date data available, saving yourself the hassle of extract and update regularly all these lists and the data.\n\n\nWhat does Apility.io offer?\n---------------------------\n\nApility.io offers an extremely simple and minimalistic API to access in realtime to these lists and do the following simple question about the resource?\n\nIs this IP, domain or email stored in any blacklist?\n\nThe answers to this question can be:\n\n* YES: The resource can be found in an abusers' list. This is a bad resource.\n* NO: The resource cannot be found in any abusers' list. This is a clean resource.\n\nA bad resource implies some kind of action from developers' side. A clean resource does not need any action from their side.\n\nHow to install\n--------------\nYou can read the instructions to install in :ref:`installation`.\n\nAPI Documentation\n-----------------\nYou can read the Python documentation :ref:`apireference`.\n\nQuickstart\n----------\nHow to start with the Python API quickly :ref:`quickstart`.\n\n\n\nContact Us\n----------\nDo you have an issue using the Apilityio Client Libraries? Or perhaps some feedback for how we can improve them? Feel free to let us know on our issue tracker https://github.com/Apilityio/python-lib/issues.\n" }, { "alpha_fraction": 0.48515161871910095, "alphanum_fraction": 0.4882775843143463, "avg_line_length": 35.352272033691406, "blob_id": "e9b3c2fdb44ee0f8b70d46ce8fa0dbd8302d6dd5", "content_id": "ae99d4f8bfaf2dd904553b0133b136d30ab01716", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3199, "license_type": "permissive", "max_line_length": 78, "num_lines": 88, "path": "/examples/geoip.py", "repo_name": "willbengtson/python-sdk", "src_encoding": "UTF-8", "text": "import sys\nimport getopt\nimport os\nimport traceback\n\nimport apilityio\nimport apilityio.errors\n\n\ndef main(argv=None):\n if argv is None:\n argv = sys.argv\n try:\n try:\n api_key = None\n ip = None\n options, remainder = getopt.getopt(\n argv[1:], 'h:a:i', ['help', 'api_key=', 'ip='])\n for opt, arg in options:\n if opt in ('-a', '--api_key'):\n api_key = arg\n if opt in ('-i', '--ip'):\n try:\n ip = unicode(arg, \"utf-8\")\n except:\n ip = arg\n elif opt in ('-h', '--help'):\n print(\"python geoip.py --api_key=<API_KEY> --ip=<IP>\")\n return 0\n except getopt.error as msg:\n raise Exception(msg)\n\n try:\n client = apilityio.Client(api_key=api_key)\n api_key, protocol, host = client.GetConnectionData()\n\n print('Host: %s' % host)\n print('Protocol: %s' % protocol)\n print('API Key: %s' % api_key)\n\n print('Geolocate IP: %s' % ip)\n\n response = client.GetGeoIP(ip)\n if response.status_code != 200:\n print(\"The API call returned this error HTTP %s: %s\" %\n (response.status_code, response.error))\n return 0\n\n geoip = response.geoip\n print('+- Accuracy radius: %s' % geoip.accuracy_radius)\n print('+- Address: %s' % geoip.address)\n print('+- City: %s' % geoip.city)\n print('+- City Geoname ID: %s' % geoip.city_geoname_id)\n print('+- City Names: %s' % geoip.city_names)\n print('+- Continent: %s' % geoip.continent)\n print('+- Continent Geo Name ID: %s' % geoip.continent_geoname_id)\n print('+- Continent Names: %s' % geoip.continent_names)\n print('+- Country: %s' % geoip.country)\n print('+- Country Geo Name ID: %s' % geoip.country_geoname_id)\n print('+- Country Names: %s' % geoip.country_names)\n print('+- Hostname: %s' % geoip.hostname)\n print('+- Latitude: %s' % geoip.latitude)\n print('+- Longitude: %s' % geoip.longitude)\n print('+- Postal code: %s' % geoip.postal)\n print('+- Region: %s' % geoip.region)\n print('+- Region Geoname ID: %s' % geoip.region_geoname_id)\n print('+- Region Names: %s' % geoip.region_names)\n print('+- Time Zone: %s' % geoip.time_zone)\n print('+--- AS number: %s' % geoip.asystem.asn)\n print('+--- AS name: %s' % geoip.asystem.name)\n print('+--- AS country: %s' % geoip.asystem.country)\n print('+--- AS networks: %s' % geoip.asystem.networks)\n\n except apilityio.errors.ApilityioValueError as ae:\n traceback.print_exc()\n print(\"ERROR: \", ae)\n return 2\n\n return 0\n except Exception as e:\n traceback.print_exc()\n print(\"ERROR: \", e)\n print(\"For help, use --help\")\n return 2\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n" }, { "alpha_fraction": 0.49151965975761414, "alphanum_fraction": 0.494911789894104, "avg_line_length": 34.095237731933594, "blob_id": "e8f25e2779fa858cc4d2ba7a78b11182fa10d680", "content_id": "3032dede09eec9e41549b5bcd996822b4c0b5424", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2948, "license_type": "permissive", "max_line_length": 86, "num_lines": 84, "path": "/examples/baddomain.py", "repo_name": "willbengtson/python-sdk", "src_encoding": "UTF-8", "text": "import sys\nimport getopt\nimport os\nimport traceback\n\nimport apilityio\nimport apilityio.errors\n\n\ndef main(argv=None):\n if argv is None:\n argv = sys.argv\n try:\n try:\n api_key = None\n domain = None\n options, remainder = getopt.getopt(\n argv[1:], 'h:a:d', ['help', 'api_key=', 'domain='])\n for opt, arg in options:\n if opt in ('-a', '--api_key'):\n api_key = arg\n if opt in ('-d', '--domain'):\n try:\n domain = unicode(arg, \"utf-8\")\n except:\n domain = arg\n elif opt in ('-h', '--help'):\n print(\"python baddomain.py --api_key=<API_KEY> --domain=<DOMAIN>\")\n return 0\n except getopt.error as msg:\n raise Exception(msg)\n\n try:\n client = apilityio.Client(api_key=api_key)\n api_key, protocol, host = client.GetConnectionData()\n\n print('Host: %s' % host)\n print('Protocol: %s' % protocol)\n print('API Key: %s' % api_key)\n\n print('BadDomain FQDN: %s' % domain)\n\n response = client.CheckDomain(domain)\n\n if response.status_code != 200:\n print(\"The API call returned this error HTTP %s: %s\" %\n (response.status_code, response.error))\n return 0\n\n dresponse = response.response\n print('+- Global score: %s' % dresponse.score)\n print('+--- Domain score: %s' % dresponse.domain.score)\n print('+--- Blacklist: %s' % dresponse.domain.blacklist)\n print('+--- Blacklist NS: %s' % dresponse.domain.blacklist_ns)\n print('+--- Blacklist MX: %s' % dresponse.domain.blacklist_mx)\n print('+--- NS: %s' % dresponse.domain.ns)\n print('+--- MX: %s' % dresponse.domain.mx)\n print('+')\n print('+--- IP score: %s' % dresponse.ip.score)\n print('+--- Blacklist: %s' % dresponse.ip.blacklist)\n print('+--- Quarantined? %s' % dresponse.ip.is_quarantined)\n print('+--- Address: %s' % dresponse.ip.address)\n print('+')\n print('+--- Source IP score: %s' % dresponse.source_ip.score)\n print('+--- Source Blacklist: %s' % dresponse.source_ip.blacklist)\n print('+--- Source Quarantined? %s' %\n dresponse.source_ip.is_quarantined)\n print('+--- Source Address: %s' % dresponse.source_ip.address)\n\n except apilityio.errors.ApilityioValueError as ae:\n traceback.print_exc()\n print(\"ERROR: \", ae)\n return 2\n\n return 0\n except Exception as e:\n traceback.print_exc()\n print(\"ERROR: \", e)\n print(\"For help, use --help\")\n return 2\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n" }, { "alpha_fraction": 0.4678252637386322, "alphanum_fraction": 0.4744011163711548, "avg_line_length": 28.56944465637207, "blob_id": "17a6532602d43c761e641a3ee8ad714f6f3b9224", "content_id": "4aaac93e1c8d1f49573525bb9191d267612b91af", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2129, "license_type": "permissive", "max_line_length": 91, "num_lines": 72, "path": "/examples/badip.py", "repo_name": "willbengtson/python-sdk", "src_encoding": "UTF-8", "text": "import sys\nimport getopt\nimport os\nimport traceback\n\nimport apilityio\nimport apilityio.errors\n\n\ndef main(argv=None):\n if argv is None:\n argv = sys.argv\n try:\n try:\n api_key = None\n ip = None\n options, remainder = getopt.getopt(\n argv[1:], 'h:a:i', ['help', 'api_key=', 'ip='])\n for opt, arg in options:\n if opt in ('-a', '--api_key'):\n api_key = arg\n if opt in ('-i', '--ip'):\n try:\n ip = unicode(arg, \"utf-8\")\n except:\n ip = arg\n elif opt in ('-h', '--help'):\n print(\"python badip.py --api_key=<API_KEY> --ip=<IP>\")\n return 0\n except getopt.error as msg:\n raise Exception(msg)\n\n try:\n client = apilityio.Client(api_key=api_key)\n api_key, protocol, host = client.GetConnectionData()\n\n print('Host: %s' % host)\n print('Protocol: %s' % protocol)\n print('API Key: %s' % api_key)\n\n print('Badip IP: %s' % ip)\n\n response = client.CheckIP(ip)\n\n if response.status_code == 404:\n print(\n \"Congratulations! The IP address has not been found in any blacklist.\")\n return 0\n if response.status_code != 200:\n print(\"The API call returned this error HTTP %s: %s\" %\n (response.status_code, response.error))\n return 0\n\n blacklists = response.blacklists\n print(\"Ooops! The IP address has been found in one or more blacklist.\")\n print('+- Blacklists: %s' % blacklists)\n\n except apilityio.errors.ApilityioValueError as ae:\n traceback.print_exc()\n print(\"ERROR: \", ae)\n return 2\n\n return 0\n except Exception as e:\n traceback.print_exc()\n print(\"ERROR: \", e)\n print(\"For help, use --help\")\n return 2\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n" }, { "alpha_fraction": 0.574999988079071, "alphanum_fraction": 0.574999988079071, "avg_line_length": 15, "blob_id": "562ef2d6239a66cc6f0952058b500a66293a2f77", "content_id": "a4e2bbb9e17337635455fc308229e0db3bad75ec", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 80, "license_type": "permissive", "max_line_length": 31, "num_lines": 5, "path": "/docs/responseobjects.rst", "repo_name": "willbengtson/python-sdk", "src_encoding": "UTF-8", "text": "Response Objects\n================\n\n.. automodule:: apilityio.model\n :members:\n" } ]
17
Warrot/GoPro
https://github.com/Warrot/GoPro
5d48bfc4304573aeb5c61f36f5eceb64ea7ea689
bffb91aa8e0f461dfb0c74c2e22ba390104f0a6f
79f01ee033c16d50db5432c9df009973e262a6dc
refs/heads/master
2020-03-27T00:17:13.089090
2018-08-21T19:56:47
2018-08-21T19:56:47
145,610,359
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7648648619651794, "alphanum_fraction": 0.7810810804367065, "avg_line_length": 22.15625, "blob_id": "81c8facf6738b26c76ed4bb3282b0cabf328a8e1", "content_id": "c351d9e9ae1dc0257d252a4beb2af16b9c845cf3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 740, "license_type": "no_license", "max_line_length": 76, "num_lines": 32, "path": "/CamTest.py", "repo_name": "Warrot/GoPro", "src_encoding": "UTF-8", "text": "#All changed must be made in the pi version\n#This is a test module\n\nfrom goprocam import GoProCamera\nfrom goprocam import constants\nimport time\n\n#Initialize an instance of cam\ngpCam = GoProCamera.GoPro(constants.auth)\ntime.sleep(4)\n\n#Set mode to single photo\ngpCam.mode(constants.Mode.PhotoMode)\ntime.sleep(2)\n\n#Take 3 photos with 10 sec interval to prevent a bird blocking the view etc.\nfor i in range(3):\n\tgpCam.downloadLastMedia(gpCam.take_photo())\n\ttime.sleep(10)\n\n#Turn off\ngpCam.power_off()\n\n\n\"\"\"\nTip:\nChanging network on mac is done like this in the command line:\nnetworksetup -setairportnetwork en0 _WiFiNAME_ _Password_\nex: networksetup -setairportnetwork en0 LosAngeles-LAX Rabarbersuppe85\nDer er en fucking typo i wifinavnet\n\n\"\"\"" }, { "alpha_fraction": 0.7660818696022034, "alphanum_fraction": 0.7690058350563049, "avg_line_length": 27.41666603088379, "blob_id": "5dcdf932ab6d46e509d6082efd66fb1459da7fe0", "content_id": "d538da6bb03ea41116548bcd1d92d57656ea1703", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 342, "license_type": "no_license", "max_line_length": 127, "num_lines": 12, "path": "/README.md", "repo_name": "Warrot/GoPro", "src_encoding": "UTF-8", "text": "# GoPro\n\nThis projects involves trying to make my GoPro HERO 3+, to: \n\tconnect to my raspberry pi via wifi\n\tturn on at sunrise/sunset\n\ttake three photos\n\tupload them to my pi\n\tturn off\n\n\nCurrent problems involve:\n\tThe pi is not able to connect to the AdHoc network created by the GoPro, while still being connected to internet via ethernet.\n\t" }, { "alpha_fraction": 0.6995994448661804, "alphanum_fraction": 0.7062750458717346, "avg_line_length": 33, "blob_id": "2f0bbc8dab7dc100a199b1e0a3ff463c8edfe080", "content_id": "c1f6e1a573cf3759fac7580498ee0450e0b2514b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 749, "license_type": "no_license", "max_line_length": 123, "num_lines": 22, "path": "/File.py", "repo_name": "Warrot/GoPro", "src_encoding": "UTF-8", "text": "#All changed must be made in the pi version\n\n\nimport glob\nimport os\nimport time\n\n#Loop over the amount of pictures we want to move\nfor i in range(3,0,-1):\n\t#Fetch newest file\n\tlist_of_files = glob.iglob('/Users/Nicolai/Documents/Python/GoPro/*.JPG') # * means all if need specific format then *.csv\n\tlatest_file = max(list_of_files, key=os.path.getctime)\n\n\t#Create new name for the file\n\tnewName = time.strftime(\"%Y_%m_%d_%H_%M_\"+str(i))+\".JPG\"\n\t\n\t#Check whether morning or evening\n\tif int(time.strftime('%H')) > 12:\n\t\t#Change name and path of the file\n\t\tos.rename(latest_file, '/Users/Nicolai/Documents/Python/GoPro/Evening/'+str(i)+'/'+newName)\n\telse:\n\t\tos.rename(latest_file, '/Users/Nicolai/Documents/Python/GoPro/Morning/'+str(i)+'/'+newName)\n\n" } ]
3
CthulhuDen/Factorials
https://github.com/CthulhuDen/Factorials
fd3b2abf35200f554ed96617f1c871923baf5bcf
0a22356a1d51e2e07fbd96ee0f4e5e45dc1eca17
31ca217049aa37912c942972d66829c2b81829b5
refs/heads/master
2021-01-19T04:52:25.742980
2013-03-21T19:47:29
2013-03-21T19:47:29
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5756374001502991, "alphanum_fraction": 0.6079320311546326, "avg_line_length": 17.010204315185547, "blob_id": "b3af87f624d510b84daf522f35ee1e0c0feaac26", "content_id": "b6c5efa1934353b1d48588d289aaed8c53cce3ed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1765, "license_type": "no_license", "max_line_length": 154, "num_lines": 98, "path": "/factorials.py", "repo_name": "CthulhuDen/Factorials", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python\n\nimport slaves\n\ndef fact(n):\n\trez = 1\n\tif n > 1:\n\t\tfor i in range(2,n+1):\n\t\t\trez = rez * i\n\treturn rez\n\ndef fact_part(st_fin):\n\tst,fin = st_fin\n\trez = 1\n\tfor i in range(st,fin+1):\n\t\trez = rez * i\n\treturn rez\n\ndef fact_double(n):\n\tmid = n/2\n\ts = slaves.slaves(fact_part)\n\ts.put((1,mid))\n\ts.put((mid+1,n))\n\ts.start()\n\trez = s.get()*s.get()\n\ts.terminate()\n\treturn rez\n\ndef prod(x):\n\ta,b = x\n\treturn a*b\n\ndef fact_double_perf(n, max_blocks=15,min_size=500, spec_reduce=reduce):\n\ts = slaves.slaves(fact_part)\n\tdist = n/max_blocks\n\tif dist < min_size:\n\t\tdist = min_size\n\tst = 1\n\tfin = st + dist - 1\n\tcount = 1\n\twhile fin<n:\n\t\ts.put((st,fin,))\n\t\tcount = count + 1\n\t\tst = fin + 1\n\t\tfin = st + dist -1\n\ts.start()\n\ts.put((st,n))\n\tlst = []\n\tfor i in range(0,count):\n\t\tlst.append(s.get())\n\ts.terminate()\n\treturn spec_reduce(lambda x,y:x*y,lst,1)\n\ndef quickreduce(func,lst,start=None,slavs=None):\n\tif slavs is None:\n\t\ts = slaves.slaves(prod)\n\t\ts.start()\n\t\ts.pause()\n\telse:\n\t\ts = slavs\n\tif start is None:\n\t\told = []\n\telse:\n\t\told = [start,]\n\told.extend(lst)\n\tarr = [old,[],]\n\tp = 0\n\tln = len(arr[p])\n\twhile ln>1:\n\t\tprev = arr[p]\n\t\tp = 1 - p\n\t\tarr[p] = []\n\t\tnext = arr[p]\n\t\tcnt = 0\n\t\tfor i in range(0,ln/2):\n\t\t\ts.put((prev[2*i],prev[2*i+1],))\n\t\t\tcnt = cnt + 1\n\t\tif ln%2==1:\n\t\t\tnext.append(prev[-1])\n\t\ts.resume()\n\t\tfor i in range(0,cnt):\n\t\t\tnext.append(s.get())\n\t\ts.pause()\n\t\tln = len(next)\n\ts.terminate()\n\treturn arr[p][0]\n\nfact_double_perf_spec_reduce = lambda n,max_blocks=126,min_size=500: fact_double_perf(n=n,max_blocks=max_blocks,min_size=min_size,spec_reduce=quickreduce)\n\ndef factorial(n):\n\tif n<5000:\n\t\treturn fact(n)\n\telif n<10000:\n\t\treturn fact_double_perf(n)\n\telif n<14000:\n\t\treturn fact_double(n)\n\telse:\n\t\treturn fact_double_perf_spec_reduce(n)\n" }, { "alpha_fraction": 0.7433379888534546, "alphanum_fraction": 0.7489480972290039, "avg_line_length": 36.52631759643555, "blob_id": "35de54a3b7d527825be41389d3f598d88dd6f703", "content_id": "6a10a4aa17e10a46ffbaed7929975b3ac9cc75e3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 713, "license_type": "no_license", "max_line_length": 124, "num_lines": 19, "path": "/README.md", "repo_name": "CthulhuDen/Factorials", "src_encoding": "UTF-8", "text": "Factorials\n==========\n\nModule with quick multiprocess implementations of factorial function\n\nFor example, on my core 2 duo standard math.factorial takes 15 secs and factorial function from this module less then 1 sec.\n\n==========\n\nAuthor(s): Denis <cthulhu> Yuzhanin (CthulhuDen@gmail.com)\n\nUsage notes:\n\tSimply import factorials file from this project, than you can call factorials.factorial\n\n\tThere is supplied time tester in repository\n\ttest.py [num [out [hash]]]\n\t\tnum - Number to calculate factorial of\n\t\tout - Either \"number\" to print outputs, \"hash\" to print hashes or \"noans\" to skip printing output\n\t\thash - (Only available with 'hash' option) string specifying hash method (must be one of those supplied in hashlib)\n" }, { "alpha_fraction": 0.7073981761932373, "alphanum_fraction": 0.7090606689453125, "avg_line_length": 22.076923370361328, "blob_id": "cdf12b47f0f410c7b724d70ccc0db058bf651d95", "content_id": "d284bfe229b9a75785118d7d302734b416470279", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1203, "license_type": "no_license", "max_line_length": 148, "num_lines": 52, "path": "/slaves.py", "repo_name": "CthulhuDen/Factorials", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python\n\nimport multiprocessing\n\ndef _func(function,operating,queueIn,queueOut,terminator,lock):\n\tcont = True\n\twhile not(terminator.is_set()):\n\t\toperating.wait()\n\t\tlock.acquire()\n\t\tif queueIn.empty():\n\t\t\tlock.release()\n\t\telse:\n\t\t\tinp = queueIn.get()\n\t\t\tlock.release()\n\t\t\trez = function(inp)\n\t\t\tqueueOut.put(rez)\n\tqueueOut.close()\n\treturn 0\n\nclass slaves:\n\t\n\tdef put(self,info):\n\t\tself.queueIn.put(info)\n\n\tdef get(self):\n\t\treturn self.queueOut.get()\n\n\tdef terminate(self):\n\t\tself.operating.set()\n\t\tself.terminator.set()\n\t\tself.queueIn.close()\n\t\t\n\tdef pause(self):\n\t\tself.operating.clear()\n\n\tdef resume(self):\n\t\tself.operating.set()\n\n\tdef __init__(self,function,number=multiprocessing.cpu_count()):\n\t\tself.queueIn = multiprocessing.Queue()\n\t\tself.queueOut = multiprocessing.Queue()\n\t\tself.terminator = multiprocessing.Event()\n\t\tself.operating = multiprocessing.Event()\n\t\tself.lock = multiprocessing.Lock()\n\t\tself.processes = []\n\t\tfor i in range(0,number):\n\t\t\tself.processes.append(multiprocessing.Process(target=_func,args=(function,self.operating,self.queueIn,self.queueOut,self.terminator,self.lock,)))\n\n\tdef start(self):\n\t\tself.operating.set()\n\t\tfor p in self.processes:\n\t\t\tp.start()\n\n\t\n" }, { "alpha_fraction": 0.6607416272163391, "alphanum_fraction": 0.6699356436729431, "avg_line_length": 31.959596633911133, "blob_id": "ed7790b8692fac8103f476816a04ef27bfbdbe9e", "content_id": "eaaeb080eaed587d06bfd165af5665360be8a822", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3263, "license_type": "no_license", "max_line_length": 123, "num_lines": 99, "path": "/test.py", "repo_name": "CthulhuDen/Factorials", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python\n\nimport sys, time, math, hashlib, factorials\n\nalllist = []\nfuncs = dict()\nfuncdiscr = dict()\n\nfuncdiscr[\"timetest\"] = \"arg, f_list, info, rezs, hash - test functions\"\ndef timetest(num, f_list=\"ALL\", info=True, rezs = True, hash = False, hashway = None):\n\tif (type(f_list)==type(\"string\")) and (f_list.upper()==\"ALL\"):\n\t\tf_list = alllist\n\tfor func in f_list:\n\t\tstring = \"Testing function \"+func+\" with \"+str(num)\n\t\tif info:\n\t\t\tstring = string+\" (\"+funcdiscr[func]+\")\"\n\t\tprint string + \":\"\n\t\ttm = time.time()\n\t\trez = funcs[func](num)\n\t\ttm = time.time() - tm\n\t\tprint \"\\tSpent \"+str(round(1000*tm)*.001)+\" secs\"\n\t\tprint \"\\tAnswer's length is \"+str(rez.bit_length())+\" bits\"\n\t\tif rezs:\n\t\t\trez = str(rez)\n\t\t\tif hash:\n\t\t\t\timport hashlib\n\t\t\t\tif hashway is None:\n\t\t\t\t\trez = hashlib.md5(rez).hexdigest()\n\t\t\t\t\tprint \"\\tOutput hash: \"+ rez\n\t\t\t\telif isinstance(hashway,str):\n\t\t\t\t\ttry:\n\t\t\t\t\t\trez = hashlib.new(hashway,rez).hexdigest()\n\t\t\t\t\t\tprint \"\\tOutput hash: \"+ rez\n\n\t\t\t\t\texcept:\n\t\t\t\t\t\tprint \"\\tSpecified hash type is not supported!\"\n\t\t\t\telse:\n\t\t\t\t\ttry:\n\t\t\t\t\t\trez = hashway(rez)\n\t\t\t\t\t\tprint \"\\tOutput hash: \"+ rez\n\t\t\t\t\texcept:\n\t\t\t\t\t\tprint \"\\tHash function supported failed!\"\n\t\t\telse:\n\t\t\t\tprint \"\\tOutput: \"+rez\nfuncs[\"timetest\"] = timetest\n\ndef present(f_list, f_discrs):\n\t\tprint \"Functions available in module:\"\n\t\tfor func in f_list:\n\t\t\tprint \"\\t\"+func+\" \\t \"+f_discrs[func]\n\t\tprint \"End of list\"\n\nalllist.append(\"math_fact\")\nfuncdiscr[\"math_fact\"] = \"Factorial from \\'math\\' module\"\nfuncs[\"math_fact\"] = math.factorial\nalllist.append(\"fact\")\nfuncdiscr[\"fact\"] = \"Simple factorial implementation\"\nfuncs[\"fact\"] = factorials.fact\nalllist.append(\"fact_double\")\nfuncdiscr[\"fact_double\"] = \"Double-process factorial implementation\"\nfuncs[\"fact_double\"] = factorials.fact_double\nalllist.append(\"fact_double_perf\")\nfuncdiscr[\"fact_double_perf\"] = \"Perfected double-process factorial implementation\"\nfuncs[\"fact_double_perf\"] = factorials.fact_double_perf\nalllist.append(\"fact_double_perf_spec_reduce\")\nfuncdiscr[\"fact_double_perf_spec_reduce\"] = \"Two-process with enhanced reduce implementation\"\nfuncs[\"fact_double_perf_spec_reduce\"] = factorials.fact_double_perf_spec_reduce\nalllist.append(\"factorial\")\nfuncdiscr[\"factorial\"] = \"Optimal choice between all functions available in the module\"\nfuncs[\"factorial\"] = factorials.factorial\n\nfunclist = alllist+[\"timetest\",]\n\nrun = True\nif __name__==\"__main__\":\n\tif run:\n\t\tnum = 100000\n\t\trezs = True\n\t\thash = True\n\t\thashway = None\n\t\ttotest = [\"math_fact\",\"fact_double\",\"fact_double_perf\",\"fact_double_perf_spec_reduce\",\"factorial\",]\n\t\t#totest = [\"fact_double_perf_spec_reduce\",\"fact_double_perf_spec_reduce_slaver\",]\n\t\ttry:\n\t\t\tif len(sys.argv)>1:\n\t\t\t\tnum = int(sys.argv[1])\n\t\t\tif len(sys.argv)>2:\n\t\t\t\tif sys.argv[2].lower()==\"number\":\n\t\t\t\t\thash = False\n\t\t\t\telif sys.argv[2].lower()==\"hash\" and len(sys.argv)>3:\n\t\t\t\t\thashway = sys.argv[3]\n\t\t\t\telif sys.argv[2].lower()==\"noans\":\n\t\t\t\t\trezs = False\n\t\texcept:\n\t\t\tpass\n\t\ttimetest(num,totest,hash=hash,rezs=rezs,hashway=hashway)\n\t\t#timetest(4000,[\"fact_double\",\"fact_double_perf\",],hash=True,hashway=\"lol\")\n\t\t#timetest(1000,[\"fact_double_perf_spec_reduce\",\"fact_double_perf_spec_reduce_tuned\",],hash=True,hashway=lambda x:x.org())\n\telse:\n\t\tpresent(funclist, funcdiscr)\n" } ]
4
jrfarah/evolution
https://github.com/jrfarah/evolution
ea19a4d62c8262c61ba7eae3d6f5fcf989328d6c
9ddd82abbfec4d5bbac66666b2af3b7c6946448f
fbeae8b039ab46998db30c21258b8d2e015c6691
refs/heads/master
2020-04-17T05:53:31.567167
2016-08-18T13:59:42
2016-08-18T13:59:42
66,001,363
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6221084594726562, "alphanum_fraction": 0.6396682262420654, "avg_line_length": 38.267173767089844, "blob_id": "aac5094a3426cf170acfdc75a7c45259b9a20681", "content_id": "28a036cdab9f0fc82095dfc437bce6b1f9b4f99d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15433, "license_type": "no_license", "max_line_length": 218, "num_lines": 393, "path": "/main.py", "repo_name": "jrfarah/evolution", "src_encoding": "UTF-8", "text": "#--------------------------------------------------\n# Written by Joseph Farah\n# Last updated: 7/30/16\n# Evolution simulator\n# User should be able to pick number of organisms, frequency of natural disasters,\n# frequency of generation, population limit, maximum number of mutations per cycle, \n# -------------------------------------------------\n\nimport random\nimport math\nimport time\nimport string\nimport matplotlib.pyplot as plt\nimport numpy\nfrom Tkinter import *\nfrom tkFileDialog import askopenfilename as selectFILE\nimport tkMessageBox as tkmb\n\n# Constants (or defaults, depending on whether or not the program accepts input)\n#---------------------------------------------------------------------------------\n# mainloop \nmain = Tk()\n\n# constant dictionary\nc = {'NUM_ORG':10, 'OPT_OFF_NUM':5, 'NAT_DIS_FREQ':10, 'GEN_FREQ':1, \"POP_LIM\":1000,'FREQ_MUT':45, 'MAX_MUT':3, 'GEN_NUM':100}\nno = IntVar()\noon = IntVar() \nndf = IntVar()\ngf = IntVar()\npl = IntVar()\nfm = IntVar()\nmm = IntVar()\npop = IntVar()\nnatcheck = IntVar()\n\n#---------------------------------------------------------------------------------\n\n# classes\nclass element_input:\n def __init__(self, parent, CONSTANT):\n\n top = self.top = Toplevel(parent)\n con = self.con = c[CONSTANT]\n CONSTANT = self.CONSTANT = CONSTANT\n Label(top, text=\"Current value is: {0}\\nPlease enter new value for {1}\".format(con, CONSTANT)).pack()\n\n self.e = Entry(top)\n self.e.pack(padx=5)\n\n b = Button(top, text=\"submit\", command=self.enter_element)\n b.pack(pady=5)\n\n def enter_element(self):\n new_value = self.e.get()\n var_idx = gui_element_names.index(self.CONSTANT)\n c[self.CONSTANT] = int(new_value)\n\n self.top.destroy()\n\nclass generation_lists:\n def __init__(self, parent):\n\n top = self.top = Toplevel(parent)\n self.listgen = Listbox(top)\n self.listgen.pack(padx=5)\n self.listgen.insert(END, \"none\")\n self.listgen.delete(0,END)\n for generation in population_MASTER:\n self.listgen.insert(END,population_MASTER.index(generation))\n \n self.listgen.bind('<<ListboxSelect>>',self.CurSelet)\n b = Button(top, text=\"submit\", command=self.select)\n b.pack(pady=5)\n\n def select(self):\n\n self.top.destroy()\n\n def CurSelet(self, evt):\n value=str(self.listgen.get(self.listgen.curselection()))\n\n\n\n# functions\n\n\ndef defining_stuff():\n global weighted_char_list, population_MASTER, char_effect, char_list, natural_disasters, natural_disaster_chance, mutation_chance,NUM_ORG, OPT_OFF_NUM, NAT_DIS_FREQ, GEN_FREQ, POP_LIM, FREQ_MUT, MAX_MUT, GEN_NUM, c\n # defining the weighted characteristics list\n weighted_char_list = []\n # generating a weighted characteristics list that will make some chars more probable than others\n for i in xrange(1,len(characteristics)+1):\n num_char = len(characteristics)-i+1\n for x in range(num_char):\n weighted_char_list.append(i)\n # natural disasters and who survives\n nat_dist_list = [1,2,3,4,5,6]\n natural_disaster_names = {1:'landslide', 2:'blizzard', 3:'drought', 4:'lightning strike', 5:'hurricane', 6:'earthquake'}\n natural_disasters = {1:'4 6 7', 2:'2 3 7', 3:'1 3 7', 4:'1 4 7', 5:'2 3 6', 6:'1 3 4 6'}\n # list for the natural disaster frequency\n natural_disaster_chance = [0 for i in xrange(100)]\n for i in xrange(0,100):\n if i >= c['NAT_DIS_FREQ']:\n break\n else:\n natural_disaster_chance[i] = 1\n # mutation chance list\n mutation_chance = [0 for i in xrange(100)]\n for i in xrange(0,100):\n if i >= c['FREQ_MUT']:\n break\n else:\n mutation_chance[i] = 1\n\n\n\ndef generate(generation):\n '''this function generates the organisms, their characteristics, and their lifetimes'''\n global weighted_char_list, population_MASTER, char_effect, char_list, natural_disasters, natural_disaster_chance, mutation_chance\n # current gen should be an empty list at the beginning because the current generation doesn't exist yet\n current_gen = []\n # if we are on the first generation, create the first generation without any previous data\n if generation == 0:\n # pick a random character from the list--all the organisms will share this characteristic\n characteristic = random.choice(char_list)\n # iterate through the number of organinisms in the initial generations, established by NUM_ORG\n for org in xrange(c['NUM_ORG']):\n # create smaller lists for each organism\n # first item: organism number, denoted by org\n # second item: characteristic, denoted by characteristic\n # third item: name of the characteristic, selected from the characteristics list\n current_gen.append([org, characteristic, characteristics[characteristic]])\n return current_gen \n # if we aren't on the first generation, generate a new generation\n # begin by iterating through each organism in the PREVIOUS GENERATION\n for organism in population_MASTER[generation-1]:\n # examine the current organisms characteristic\n # this will determine the success of the organism's reproductive cycle\n org_char = organism[1]\n # value essentially represents the deviation from the optimal offspring, set by OPT_OFF_NUM\n off_change = char_effect[org_char]\n # checking if the deviation is positive or negative\n if off_change == '-':\n # if negative, subtract the optimal offspring number\n number_of_offspring = c['OPT_OFF_NUM'] - random.randint(0,c['OPT_OFF_NUM'])\n elif off_change == '+':\n # if positive, add to the optimal offspring number\n number_of_offspring = c['OPT_OFF_NUM'] + random.randint(0,c['OPT_OFF_NUM'])\n # generating the offspring for each parent\n # iterates through the number of offspring, denoted by number_of_offspring\n for offspring in xrange(0,number_of_offspring):\n # randomly selects from the weighted mutation chance list\n # 1 denotes a successful mutation, 0 denotes no mutation\n will_mutate = random.choice(mutation_chance)\n if will_mutate == 1:\n # if mutation is successful, pick a random characteristic \n # DIFFERENT from the parent's characteristic\n mutation = random.choice(weighted_char_list)\n while mutation == org_char:\n mutation = random.choice(weighted_char_list)\n # add it to the current generation\n current_gen.append([offspring,mutation,characteristics[mutation]])\n # if the organism does not mutate, it is identical to the parent. \n # duplicate the parent and append it to the current gen\n elif will_mutate == 0:\n current_gen.append(organism)\n # find out the size of the current generation\n population_size = len(current_gen)\n # if the population is larger than the limit, denoted by POP_LIM, make it within the limit\n # splice time!\n if population_size >= c['POP_LIM']:\n current_gen = current_gen[:c['POP_LIM']]\n return current_gen\n\ndef get_per(generation):\n \n global population_MASTER, char_list, characteristics\n # initiliaze the percentages list\n percentages = []\n\n # find the length of the current generation\n length = float(len(population_MASTER[generation]))\n # iterate through all possible characteristics, tally up organisms, and divide to find the percentages \n for attribute in char_list:\n tmp_count = 0\n for organism in population_MASTER[generation]:\n if organism[1] == attribute:\n tmp_count+=1\n percentages.append([characteristics[attribute], 100*(tmp_count/length)])\n return percentages\n\ndef get_final_per():\n '''\n get the percentages of each characteristic for the current generation.\n Call this function only!!! after population_MASTER has been filled. \n '''\n global population_MASTER, per_list\n per_list = []\n for gen in population_MASTER:\n gen_num = population_MASTER.index(gen)\n per_list.append(get_per(gen_num))\n\ndef natural_disaster(generation):\n global population_MASTER, char_list, natural_disasters, natural_disaster_names, nat_dist_list, natlist\n gen = population_MASTER[generation]\n nat_dist_type = random.choice(nat_dist_list)\n who_survives = natural_disasters[nat_dist_type].split()\n who_survives = [int(s) for s in who_survives]\n for organism in population_MASTER[generation]:\n if organism[1] not in who_survives:\n population_MASTER[generation].remove(organism)\n else:\n pass\n natlist.append([natural_disaster_names[nat_dist_type],generation])\n return population_MASTER\n\n# button functions\n\ndef constant_change(constant):\n element_input(main, constant)\n\n\ndef graph():\n '''\n graph the evolution according to the specifications set by the user\n '''\n global per_list, natlist\n y = []\n approved_list = ['null']\n if no.get() == 1:\n approved_list.append(characteristics[1])\n if oon.get() == 1:\n approved_list.append(characteristics[2]) \n if ndf.get() == 1:\n approved_list.append(characteristics[3])\n if gf.get() == 1:\n approved_list.append(characteristics[4])\n if pl.get() == 1:\n approved_list.append(characteristics[5])\n if fm.get() == 1:\n approved_list.append(characteristics[6])\n if mm.get() == 1:\n approved_list.append(characteristics[7])\n\n\n x = []\n print characteristics[1]\n fig, ax = plt.subplots()\n plt.ion()\n for char in char_list:\n y_list = []\n for gen in per_list: \n y_list.append(gen[char-1][1])\n y.append([characteristics[char],y_list])\n for i in range(0,len(population_MASTER)):\n x.append(i)\n population_num = []\n for generation in population_MASTER:\n population_num.append(len(generation)/(10*(c['POP_LIM']/1000)))\n if pop.get() == 1:\n ax.plot(x,population_num, linestyle='dashed', label='Population (scaled)')\n for dataset in y:\n if dataset[0] in approved_list:\n ax.plot(x,dataset[1],label=dataset[0])\n plt.pause(0.1)\n for ND in natlist:\n ax.text(ND[1],90, '-{0}'.format(ND[0]),rotation=45)\n if natcheck.get() == 1:\n ax.axvline(x=ND[1], linewidth=1, color='k')\n\n\n # making the legend\n legend = ax.legend(loc='upper right', shadow=True)\n for label in legend.get_texts():\n label.set_fontsize('small')\n\n plt.show()\n # generation_lists(main)\n\n\n\ndef main_function():\n '''generate population master, including all effects to the population'''\n global c, population_MASTER, nat_dist_check, natural_disaster_chance, per_list\n del population_MASTER[:]\n defining_stuff()\n for i in range(0,c['GEN_NUM']):\n population_MASTER.append(generate(i))\n nat_dist_check = random.choice(natural_disaster_chance)\n if nat_dist_check == 1:\n population_MASTER = natural_disaster(i)\n #time.sleep(c['GEN_FREQ'])\n get_final_per()\n tkmb.showinfo(\"Process Completed\",\"Process complete, EVOLUTION terminated\")\n\ndef get_profile():\n '''load profile from file'''\n global gui_element_names\n profile_filepath = selectFILE()\n with open(profile_filepath) as f:\n profile = f.readlines()\n f.close()\n for i in range(len(gui_element_names)):\n c[gui_element_names[i]] = int(profile[i])\n tkmb.showinfo(\"Process Completed\",\"Current Profile Loaded\")\n\ndef export_profile():\n '''export profile to file'''\n global gui_element_names\n profilename = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(5))+'.profile'\n with open(profilename, 'w') as o:\n for i in range(len(gui_element_names)):\n o.write(\"%s\\n\" % c[gui_element_names[i]])\n o.close()\n tkmb.showinfo(\"Process Completed\",\"Profile Exported\")\n\n\n\n\n# main portion of program\n\n# Lists and dictionaries\n#---------------------------------------------------------------------------------\ncharacteristics = {1:'heat-resistant', 2:'cold-resistant', 3:'energy-efficient', 4:'fast', 5:'slow', 6:'big', 7:'small', 8:'attractive'}\n# character list for iteration\nchar_list = [1,2,3,4,5,6,7]\n# defining which characteristics lead to an increase, decrease, or no change in reproductive activity\nchar_effect = {1:'-', 2:'+',3:'+',4:'+',5:'-',6:'+',7:'-',8:'+'}\nnat_dist_list = [1,2,3,4,5,6]\nnatural_disaster_names = {1:'landslide', 2:'blizzard', 3:'drought', 4:'lightning strike', 5:'hurricane', 6:'earthquake'}\nnatural_disasters = {1:'4 6 7', 2:'2 3 7', 3:'1 3 7', 4:'1 4 7', 5:'2 3 6', 6:'1 3 4 6'}\n# the master list that contains all generations\npopulation_MASTER = []\n# percentage lists\nper_list = []\nnatlist = []\n# defining of GUI elements\ngui_element_names = ['NUM_ORG', 'OPT_OFF_NUM', 'NAT_DIS_FREQ', 'GEN_FREQ', 'POP_LIM', 'FREQ_MUT', 'MAX_MUT', 'GEN_NUM']\nr = 0\ncc = 0\nfor element in gui_element_names:\n Label(main, text=element).grid(row=r,column=cc)\n r += 1\nr = 0\nfor char in char_list:\n Label(main, text=characteristics[char]+'?').grid(row=r, column=3)\n r +=1\n\nmenubar = Menu(main)\nmenubar.add_command(label=\"Quit!\", command=main.quit)\nmenubar.add_command(label=\"Load Profile\", command=get_profile)\nmenubar.add_command(label=\"Export Profile\", command=export_profile)\n\n\nButton(main,text='NUM_ORG', command=lambda:constant_change('NUM_ORG')).grid(row = 0, column=1)\nButton(main,text='OPT_OFF_NUM', command=lambda:constant_change('OPT_OFF_NUM')).grid(row = 1, column=1)\nButton(main,text='NAT_DIS_FREQ', command=lambda:constant_change('NAT_DIS_FREQ')).grid(row = 2, column=1)\nButton(main,text='GEN_FREQ', command=lambda:constant_change('GEN_FREQ')).grid(row = 3, column=1)\nButton(main,text='POP_LIM', command=lambda:constant_change('POP_LIM')).grid(row = 4, column=1)\nButton(main,text='FREQ_MUT', command=lambda:constant_change('FREQ_MUT')).grid(row = 5, column=1)\nButton(main,text='MAX_MUT', command=lambda:constant_change('MAX_MUT')).grid(row = 6, column=1)\nButton(main,text='GEN_NUM', command=lambda:constant_change('GEN_NUM')).grid(row = 7, column=1)\nButton(main,text='EXECUTE MAIN',command=main_function).grid(row=8,column=0)\nButton(main,text='GRAPH',command=graph).grid(row=8,column=1)\na = Checkbutton(main, text=\"<---Graph\", variable=no)\na.grid(row=0, column=2, sticky=W)\na.toggle()\nb=Checkbutton(main, text=\"<---Graph\", variable=oon)\nb.grid(row=1, column=2, sticky=W)\nb.toggle()\nc1 = Checkbutton(main, text=\"<---Graph\", variable=ndf)\nc1.grid(row=2, column=2, sticky=W)\nc1.toggle()\nk1=Checkbutton(main, text=\"<---Graph\", variable=gf)\nk1.grid(row=3, column=2, sticky=W)\nk1.toggle()\nd1=Checkbutton(main, text=\"<---Graph\", variable=pl)\nd1.grid(row=4, column=2, sticky=W)\nd1.toggle()\ne1=Checkbutton(main, text=\"<---Graph\", variable=fm)\ne1.grid(row=5, column=2, sticky=W)\ne1.toggle()\nf1=Checkbutton(main, text=\"<---Graph\", variable=mm)\nf1.grid(row=6, column=2, sticky=W)\nf1.toggle()\ng1=Checkbutton(main, text=\"Graph pop\", variable=pop)\ng1.grid(row=7, column=2, sticky=W)\ng1.toggle()\nh1=Checkbutton(main, text=\"Show natural disaster lines?\", variable=natcheck)\nh1.grid(row=8, column=2, sticky=W, columnspan=2)\n\nmain.config(menu=menubar)\nmain.mainloop()\n\n" } ]
1
ZachOrr/google-cloud-datastore-stub
https://github.com/ZachOrr/google-cloud-datastore-stub
ddd124d67680b2957fa1820b05c35258c4a528d0
ca1ef73a0a84fa5de3e3defcbfecbf6c9889d18e
0c3532a30bd90985e6efa87d64196327fcc8f7c4
refs/heads/master
2023-02-24T13:53:06.960824
2020-06-07T15:41:45
2020-06-07T15:41:45
270,352,335
0
0
MIT
2020-06-07T15:43:37
2020-06-07T15:42:00
2020-06-07T15:41:57
null
[ { "alpha_fraction": 0.5106140971183777, "alphanum_fraction": 0.515163004398346, "avg_line_length": 27.06382942199707, "blob_id": "6c8d02f40f98050bface85f62a8f11afb5c6c28b", "content_id": "d1e52290c69c1f194df5a99d32646233d4d41506", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2638, "license_type": "permissive", "max_line_length": 85, "num_lines": 94, "path": "/stubs/google/cloud/datastore_v1/gapic/datastore_client.pyi", "repo_name": "ZachOrr/google-cloud-datastore-stub", "src_encoding": "UTF-8", "text": "from google.cloud.datastore_v1.gapic import (\n datastore_client_config as datastore_client_config,\n enums as enums,\n)\nfrom google.cloud.datastore_v1.gapic.transports import (\n datastore_grpc_transport as datastore_grpc_transport,\n)\nfrom google.cloud.datastore_v1.proto import (\n datastore_pb2 as datastore_pb2,\n datastore_pb2_grpc as datastore_pb2_grpc,\n entity_pb2 as entity_pb2,\n query_pb2 as query_pb2,\n)\nfrom google.oauth2 import service_account as service_account\nfrom typing import Any, Optional\n\nclass DatastoreClient:\n SERVICE_ADDRESS: str = ...\n @classmethod\n def from_service_account_file(cls, filename: Any, *args: Any, **kwargs: Any): ...\n from_service_account_json: Any = ...\n transport: Any = ...\n def __init__(\n self,\n transport: Optional[Any] = ...,\n channel: Optional[Any] = ...,\n credentials: Optional[Any] = ...,\n client_config: Optional[Any] = ...,\n client_info: Optional[Any] = ...,\n client_options: Optional[Any] = ...,\n ) -> None: ...\n def lookup(\n self,\n project_id: Any,\n keys: Any,\n read_options: Optional[Any] = ...,\n retry: Any = ...,\n timeout: Any = ...,\n metadata: Optional[Any] = ...,\n ): ...\n def run_query(\n self,\n project_id: Any,\n partition_id: Any,\n read_options: Optional[Any] = ...,\n query: Optional[Any] = ...,\n gql_query: Optional[Any] = ...,\n retry: Any = ...,\n timeout: Any = ...,\n metadata: Optional[Any] = ...,\n ): ...\n def begin_transaction(\n self,\n project_id: Any,\n transaction_options: Optional[Any] = ...,\n retry: Any = ...,\n timeout: Any = ...,\n metadata: Optional[Any] = ...,\n ): ...\n def commit(\n self,\n project_id: Any,\n mode: Any,\n mutations: Any,\n transaction: Optional[Any] = ...,\n retry: Any = ...,\n timeout: Any = ...,\n metadata: Optional[Any] = ...,\n ): ...\n def rollback(\n self,\n project_id: Any,\n transaction: Any,\n retry: Any = ...,\n timeout: Any = ...,\n metadata: Optional[Any] = ...,\n ): ...\n def allocate_ids(\n self,\n project_id: Any,\n keys: Any,\n retry: Any = ...,\n timeout: Any = ...,\n metadata: Optional[Any] = ...,\n ): ...\n def reserve_ids(\n self,\n project_id: Any,\n keys: Any,\n database_id: Optional[Any] = ...,\n retry: Any = ...,\n timeout: Any = ...,\n metadata: Optional[Any] = ...,\n ): ...\n" }, { "alpha_fraction": 0.7682119011878967, "alphanum_fraction": 0.7748344540596008, "avg_line_length": 20.571428298950195, "blob_id": "e0f8da8d36b03ac0cf5c65cd8c8a70c5b96f4d38", "content_id": "647d2ffe173baa64063d4b158956783c255e4fe0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 151, "license_type": "permissive", "max_line_length": 43, "num_lines": 7, "path": "/InMemoryCloudDatastoreStub/_stored_object.py", "repo_name": "ZachOrr/google-cloud-datastore-stub", "src_encoding": "UTF-8", "text": "from google.cloud.datastore_v1 import types\nfrom typing import NamedTuple\n\n\nclass _StoredObject(NamedTuple):\n version: int\n entity: types.Entity\n" }, { "alpha_fraction": 0.5795847773551941, "alphanum_fraction": 0.5807381868362427, "avg_line_length": 29.421052932739258, "blob_id": "101bc30a15630ce51ee77cadd6c42903ce51cbbf", "content_id": "56dfdf48bd4f36ddc296b13863f6d15aa4e4c0a5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1734, "license_type": "permissive", "max_line_length": 85, "num_lines": 57, "path": "/stubs/google/cloud/datastore/key.pyi", "repo_name": "ZachOrr/google-cloud-datastore-stub", "src_encoding": "UTF-8", "text": "from google.cloud._helpers import _to_bytes as _to_bytes\nfrom google.cloud.datastore import _app_engine_key_pb2 as _app_engine_key_pb2\nfrom typing import Any, Optional\n\n_DATABASE_ID_TEMPLATE: str\n_BAD_ELEMENT_TEMPLATE: str\n_EMPTY_ELEMENT: str\n\nclass Key:\n _flat_path: Any = ...\n _namespace: Any = ...\n _project: Any = ...\n _path: Any = ...\n def __init__(self, *path_args: Any, **kwargs: Any) -> None: ...\n def __eq__(self, other: Any) -> Any: ...\n def __ne__(self, other: Any) -> Any: ...\n def __hash__(self) -> Any: ...\n @staticmethod\n def _parse_path(path_args: Any): ...\n def _combine_args(self): ...\n def _clone(self): ...\n def completed_key(self, id_or_name: Any): ...\n def to_protobuf(self): ...\n def to_legacy_urlsafe(self, location_prefix: Optional[Any] = ...): ...\n @classmethod\n def from_legacy_urlsafe(cls, urlsafe: Any): ...\n @property\n def is_partial(self): ...\n @property\n def namespace(self): ...\n @property\n def path(self): ...\n @property\n def flat_path(self): ...\n @property\n def kind(self): ...\n @property\n def id(self): ...\n @property\n def name(self): ...\n @property\n def id_or_name(self): ...\n @property\n def project(self): ...\n def _make_parent(self): ...\n _parent: Any = ...\n @property\n def parent(self): ...\n def __repr__(self): ...\n\ndef _validate_project(project: Any, parent: Any): ...\ndef _clean_app(app_str: Any): ...\ndef _get_empty(value: Any, empty_value: Any): ...\ndef _check_database_id(database_id: Any) -> None: ...\ndef _add_id_or_name(flat_path: Any, element_pb: Any, empty_allowed: Any) -> None: ...\ndef _get_flat_path(path_pb: Any): ...\ndef _to_legacy_path(dict_path: Any): ...\n" }, { "alpha_fraction": 0.7423208355903625, "alphanum_fraction": 0.7448805570602417, "avg_line_length": 30.675676345825195, "blob_id": "cc96bc8b2e75d159e6c19491e851349468589a2b", "content_id": "8dfc519935480d57040137bdac17668af52a0f23", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1172, "license_type": "permissive", "max_line_length": 90, "num_lines": 37, "path": "/tests/conftest.py", "repo_name": "ZachOrr/google-cloud-datastore-stub", "src_encoding": "UTF-8", "text": "import pytest\nfrom google.cloud import ndb\nfrom google.cloud.datastore_v1.proto import datastore_pb2_grpc\nfrom google.cloud.ndb import _datastore_api\nfrom _pytest.monkeypatch import MonkeyPatch\n\nfrom InMemoryCloudDatastoreStub import datastore_stub\n\n\n@pytest.fixture(autouse=True)\ndef init_ndb_env_vars(monkeypatch: MonkeyPatch) -> None:\n \"\"\"\n Initializing an ndb Client in a test env requires some environment variables to be set\n For now, these are just garbage values intended to give the library _something_\n (we don't expect them to actually work yet)\n \"\"\"\n\n monkeypatch.setenv(\"DATASTORE_EMULATOR_HOST\", \"localhost\")\n monkeypatch.setenv(\"DATASTORE_DATASET\", \"datastore-stub-test\")\n\n\n@pytest.fixture(autouse=True)\ndef ndb_stub(monkeypatch: MonkeyPatch) -> datastore_stub.LocalDatastoreStub:\n stub = datastore_stub.LocalDatastoreStub()\n\n def mock_stub() -> datastore_pb2_grpc.DatastoreStub:\n return stub\n\n monkeypatch.setattr(_datastore_api, \"stub\", mock_stub)\n return stub\n\n\n@pytest.fixture(autouse=True)\ndef ndb_context(init_ndb_env_vars):\n client = ndb.Client()\n with client.context() as context:\n yield context\n" }, { "alpha_fraction": 0.5502282977104187, "alphanum_fraction": 0.5502282977104187, "avg_line_length": 27.25806427001953, "blob_id": "f8b0daebb5a49143c1c7c72a2246ac0f621d676f", "content_id": "09ded05848d8fd0d6908e15f0e6e4b941ca62317", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 876, "license_type": "permissive", "max_line_length": 81, "num_lines": 31, "path": "/stubs/google/cloud/ndb/_gql.pyi", "repo_name": "ZachOrr/google-cloud-datastore-stub", "src_encoding": "UTF-8", "text": "from google.cloud.ndb import exceptions as exceptions, key as key, model as model\nfrom typing import Any, Optional\n\nclass GQL:\n TOKENIZE_REGEX: Any = ...\n RESERVED_KEYWORDS: Any = ...\n def __init__(\n self,\n query_string: Any,\n _app: Optional[Any] = ...,\n _auth_domain: Optional[Any] = ...,\n namespace: Optional[Any] = ...,\n ) -> None: ...\n def filters(self): ...\n def hint(self): ...\n def limit(self): ...\n def offset(self): ...\n def orderings(self): ...\n def is_keys_only(self): ...\n def projection(self): ...\n def is_distinct(self): ...\n def kind(self): ...\n def query_filters(self, model_class: Any, filters: Any): ...\n def get_query(self): ...\n\nclass Literal:\n def __init__(self, value: Any) -> None: ...\n def Get(self): ...\n def __eq__(self, other: Any) -> Any: ...\n\nFUNCTIONS: Any\n" }, { "alpha_fraction": 0.7411764860153198, "alphanum_fraction": 0.7490196228027344, "avg_line_length": 35.42856979370117, "blob_id": "8280cb9d660633b87f3eb07a1417900cfacd5778", "content_id": "4ab97bdd192e989a039093f640f954fcd768839d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 255, "license_type": "permissive", "max_line_length": 76, "num_lines": 7, "path": "/stubs/google/cloud/datastore_v1/__init__.pyi", "repo_name": "ZachOrr/google-cloud-datastore-stub", "src_encoding": "UTF-8", "text": "from google.cloud.datastore_v1 import types as types\nfrom google.cloud.datastore_v1.gapic import datastore_client, enums as enums\nfrom typing import Any\n\nclass DatastoreClient(datastore_client.DatastoreClient):\n __doc__: Any = ...\n enums: Any = ...\n" }, { "alpha_fraction": 0.7564767003059387, "alphanum_fraction": 0.7668393850326538, "avg_line_length": 16.545454025268555, "blob_id": "d28053b6acfc19899b58b4798f86b89fb2ba13dc", "content_id": "e768c37facfb3f01994f78cc90426ee2831bdfd5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 193, "license_type": "permissive", "max_line_length": 60, "num_lines": 11, "path": "/stubs/google/cloud/datastore/_app_engine_key_pb2.pyi", "repo_name": "ZachOrr/google-cloud-datastore-stub", "src_encoding": "UTF-8", "text": "from google.protobuf import descriptor_pb2 as descriptor_pb2\nfrom typing import Any\n\n_b: Any\n_sym_db: Any\nDESCRIPTOR: Any\n_REFERENCE: Any\n_PATH_ELEMENT: Any\n_PATH: Any\nReference: Any\nPath: Any\n" }, { "alpha_fraction": 0.6767676472663879, "alphanum_fraction": 0.6767676472663879, "avg_line_length": 32, "blob_id": "b73161720cb83e20078f88557ab6f9a70dd3b9c3", "content_id": "0bbcd3753f844f9ba9818444a345169ba889bd34", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 99, "license_type": "permissive", "max_line_length": 64, "num_lines": 3, "path": "/stubs/google/cloud/ndb/_batch.pyi", "repo_name": "ZachOrr/google-cloud-datastore-stub", "src_encoding": "UTF-8", "text": "from typing import Any, Optional\n\ndef get_batch(batch_cls: Any, options: Optional[Any] = ...): ...\n" }, { "alpha_fraction": 0.5799256563186646, "alphanum_fraction": 0.5799256563186646, "avg_line_length": 32.625, "blob_id": "788ab16b5df635452ec0f78786112167245727c4", "content_id": "fec9b8148fe49b1156e2d705d32eb6ff359cedcb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 269, "license_type": "permissive", "max_line_length": 53, "num_lines": 8, "path": "/stubs/google/cloud/ndb/_datastore_types.pyi", "repo_name": "ZachOrr/google-cloud-datastore-stub", "src_encoding": "UTF-8", "text": "from google.cloud.ndb import exceptions as exceptions\nfrom typing import Any\n\nclass BlobKey:\n def __init__(self, blob_key: Any) -> None: ...\n def __eq__(self, other: Any) -> Any: ...\n def __lt__(self, other: Any) -> Any: ...\n def __hash__(self) -> Any: ...\n" }, { "alpha_fraction": 0.5856777429580688, "alphanum_fraction": 0.5882353186607361, "avg_line_length": 29.076923370361328, "blob_id": "921c4b5fd174b518d091806561d0d9e3ede46ad0", "content_id": "facd39100dcb51de07990b189d83795e7d6d1812", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2737, "license_type": "permissive", "max_line_length": 80, "num_lines": 91, "path": "/stubs/google/cloud/ndb/_datastore_query.pyi", "repo_name": "ZachOrr/google-cloud-datastore-stub", "src_encoding": "UTF-8", "text": "from google.cloud.datastore import helpers as helpers\nfrom google.cloud.datastore_v1.proto import (\n datastore_pb2 as datastore_pb2,\n entity_pb2 as entity_pb2,\n query_pb2 as query_pb2,\n)\nfrom google.cloud.ndb import (\n exceptions as exceptions,\n model as model,\n tasklets as tasklets,\n)\nfrom typing import Any, Optional\n\nlog: Any\nMoreResultsType: Any\nMORE_RESULTS_TYPE_NOT_FINISHED: Any\nMORE_RESULTS_AFTER_LIMIT: Any\nResultType: Any\nRESULT_TYPE_FULL: Any\nRESULT_TYPE_KEY_ONLY: Any\nRESULT_TYPE_PROJECTION: Any\nDOWN: Any\nUP: Any\nFILTER_OPERATORS: Any\n\ndef make_filter(name: Any, op: Any, value: Any): ...\ndef make_composite_and_filter(filter_pbs: Any): ...\ndef fetch(query: Any) -> None: ...\ndef iterate(query: Any, raw: bool = ...): ...\n\nclass QueryIterator:\n def __iter__(self) -> Any: ...\n def has_next(self) -> None: ...\n def has_next_async(self) -> None: ...\n def probably_has_next(self) -> None: ...\n def next(self) -> None: ...\n def cursor_before(self) -> None: ...\n def cursor_after(self) -> None: ...\n def index_list(self) -> None: ...\n\nclass _QueryIteratorImpl(QueryIterator):\n def __init__(self, query: Any, raw: bool = ...) -> None: ...\n def has_next(self): ...\n def has_next_async(self) -> None: ...\n def probably_has_next(self): ...\n def next(self): ...\n __next__: Any = ...\n def cursor_before(self): ...\n def cursor_after(self): ...\n\nclass _PostFilterQueryIteratorImpl(QueryIterator):\n def __init__(self, query: Any, predicate: Any, raw: bool = ...) -> None: ...\n def has_next(self): ...\n def has_next_async(self) -> None: ...\n def probably_has_next(self): ...\n def next(self): ...\n __next__: Any = ...\n def cursor_before(self): ...\n def cursor_after(self): ...\n\nclass _MultiQueryIteratorImpl(QueryIterator):\n def __init__(self, query: Any, raw: bool = ...) -> None: ...\n def has_next(self): ...\n def has_next_async(self) -> None: ...\n def probably_has_next(self): ...\n def next(self): ...\n __next__: Any = ...\n def cursor_before(self) -> None: ...\n def cursor_after(self) -> None: ...\n\nclass _Result:\n result_type: Any = ...\n result_pb: Any = ...\n order_by: Any = ...\n cursor: Any = ...\n def __init__(\n self, result_type: Any, result_pb: Any, order_by: Optional[Any] = ...\n ) -> None: ...\n def __lt__(self, other: Any) -> Any: ...\n def __eq__(self, other: Any) -> Any: ...\n def entity(self): ...\n\nclass Cursor:\n @classmethod\n def from_websafe_string(cls, urlsafe: Any): ...\n cursor: Any = ...\n def __init__(\n self, cursor: Optional[Any] = ..., urlsafe: Optional[Any] = ...\n ) -> None: ...\n def to_websafe_string(self): ...\n def urlsafe(self): ...\n" }, { "alpha_fraction": 0.8230088353157043, "alphanum_fraction": 0.8230088353157043, "avg_line_length": 20.5238094329834, "blob_id": "688a0858560b459514c37e458ac0bcc055ef6943", "content_id": "ca38266ea37ad14b882583bd4564ab722be2b80e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 452, "license_type": "permissive", "max_line_length": 30, "num_lines": 21, "path": "/stubs/google/cloud/datastore_v1/proto/datastore_pb2.pyi", "repo_name": "ZachOrr/google-cloud-datastore-stub", "src_encoding": "UTF-8", "text": "from typing import Any\n\nDESCRIPTOR: Any\nLookupRequest = Any\nLookupResponse = Any\nRunQueryRequest = Any\nRunQueryResponse = Any\nBeginTransactionRequest = Any\nBeginTransactionResponse = Any\nRollbackRequest = Any\nRollbackResponse = Any\nCommitRequest = Any\nCommitResponse = Any\nAllocateIdsRequest = Any\nAllocateIdsResponse = Any\nReserveIdsRequest = Any\nReserveIdsResponse = Any\nMutation = Any\nMutationResult = Any\nReadOptions = Any\nTransactionOptions = Any\n" }, { "alpha_fraction": 0.6082473993301392, "alphanum_fraction": 0.6082473993301392, "avg_line_length": 26.714284896850586, "blob_id": "f80ba156fb1d95fab1eaf2bcb9d36890e0f549c0", "content_id": "d04c7a2978d9834c5f515d7cc814b720f90eb82e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 194, "license_type": "permissive", "max_line_length": 62, "num_lines": 7, "path": "/stubs/google/cloud/ndb/msgprop.pyi", "repo_name": "ZachOrr/google-cloud-datastore-stub", "src_encoding": "UTF-8", "text": "from typing import Any\n\nclass EnumProperty:\n def __init__(self, *args: Any, **kwargs: Any) -> None: ...\n\nclass MessageProperty:\n def __init__(self, *args: Any, **kwargs: Any) -> None: ...\n" }, { "alpha_fraction": 0.5836299061775208, "alphanum_fraction": 0.5836299061775208, "avg_line_length": 30.22222137451172, "blob_id": "207f06064aa7233ec457eae345415c6b0b65d572", "content_id": "1609dcfda45960bfcf3893a49f87c3e7dcc7e26e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 562, "license_type": "permissive", "max_line_length": 79, "num_lines": 18, "path": "/stubs/google/cloud/ndb/_options.pyi", "repo_name": "ZachOrr/google-cloud-datastore-stub", "src_encoding": "UTF-8", "text": "from google.cloud.ndb import exceptions as exceptions\nfrom typing import Any, Optional\n\nlog: Any\n\nclass Options:\n @classmethod\n def options(cls, wrapped: Any): ...\n @classmethod\n def slots(cls): ...\n def __init__(self, config: Optional[Any] = ..., **kwargs: Any) -> None: ...\n def __eq__(self, other: Any) -> Any: ...\n def __ne__(self, other: Any) -> Any: ...\n def copy(self, **kwargs: Any): ...\n def items(self) -> None: ...\n\nclass ReadOptions(Options):\n def __init__(self, config: Optional[Any] = ..., **kwargs: Any) -> None: ...\n" }, { "alpha_fraction": 0.8329113721847534, "alphanum_fraction": 0.8329113721847534, "avg_line_length": 42.88888931274414, "blob_id": "a173783ecc92c97a815a2d6bc5d1ddce7e061e2c", "content_id": "4672662c689f2ada084bb7139858f0a112a38aad", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 395, "license_type": "permissive", "max_line_length": 73, "num_lines": 9, "path": "/stubs/google/cloud/datastore/__init__.pyi", "repo_name": "ZachOrr/google-cloud-datastore-stub", "src_encoding": "UTF-8", "text": "from google.cloud.datastore.batch import Batch as Batch\nfrom google.cloud.datastore.client import Client as Client\nfrom google.cloud.datastore.entity import Entity as Entity\nfrom google.cloud.datastore.key import Key as Key\nfrom google.cloud.datastore.query import Query as Query\nfrom google.cloud.datastore.transaction import Transaction as Transaction\nfrom typing import Any\n\n__version__: Any\n" }, { "alpha_fraction": 0.7579250931739807, "alphanum_fraction": 0.7579250931739807, "avg_line_length": 16.350000381469727, "blob_id": "348fb2db8c9a035d52d326e9823bee80ac0241b2", "content_id": "023ef1eca71cae2655653ac0f3cd09bce72a112d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 347, "license_type": "permissive", "max_line_length": 29, "num_lines": 20, "path": "/mypy.ini", "repo_name": "ZachOrr/google-cloud-datastore-stub", "src_encoding": "UTF-8", "text": "[mypy]\nwarn_redundant_casts = True\nwarn_unused_ignores = True\nmypy_path=stubs\n\n[mypy-pytest.*]\nignore_missing_imports = True\nignore_errors=True\n\n[mypy-grpc.*]\nignore_missing_imports = True\nignore_errors=True\n\n[mypy-_pytest.*]\nignore_missing_imports = True\nignore_errors=True\n\n[mypy-google.cloud.*]\nignore_missing_imports = True\nignore_errors=True\n" }, { "alpha_fraction": 0.7387387156486511, "alphanum_fraction": 0.7387387156486511, "avg_line_length": 12.875, "blob_id": "cdc17c01de7f39fe51e342256695c76b564aea7f", "content_id": "a82ff81c37528da3987bf0904dd8005560b582fd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 111, "license_type": "permissive", "max_line_length": 22, "num_lines": 8, "path": "/stubs/google/cloud/datastore_v1/proto/entity_pb2.pyi", "repo_name": "ZachOrr/google-cloud-datastore-stub", "src_encoding": "UTF-8", "text": "from typing import Any\n\nDESCRIPTOR = Any\nPartitionId = Any\nKey = Any\nArrayValue = Any\nValue = Any\nEntity = Any\n" }, { "alpha_fraction": 0.5441374778747559, "alphanum_fraction": 0.5441374778747559, "avg_line_length": 30.913978576660156, "blob_id": "c385625cf865ec1a3be14e3a39037555196e362b", "content_id": "7df1672c9240fc7220b081c93d0820204bf1c69d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11872, "license_type": "permissive", "max_line_length": 78, "num_lines": 372, "path": "/stubs/google/cloud/ndb/model.pyi", "repo_name": "ZachOrr/google-cloud-datastore-stub", "src_encoding": "UTF-8", "text": "import six\nfrom google.cloud.ndb import exceptions, key as key_module\nfrom typing import Any, Optional\n\nKey = key_module.Key\nBlobKey: Any\nGeoPt: Any\nRollback = exceptions.Rollback\n\nclass KindError(exceptions.BadValueError): ...\nclass InvalidPropertyError(exceptions.Error): ...\n\nBadProjectionError = InvalidPropertyError\n\nclass UnprojectedPropertyError(exceptions.Error): ...\nclass ReadonlyPropertyError(exceptions.Error): ...\nclass ComputedPropertyError(ReadonlyPropertyError): ...\nclass UserNotFoundError(exceptions.Error): ...\n\nclass _NotEqualMixin:\n def __ne__(self, other: Any) -> Any: ...\n\nclass IndexProperty(_NotEqualMixin):\n def __new__(cls, name: Any, direction: Any): ...\n @property\n def name(self): ...\n @property\n def direction(self): ...\n def __eq__(self, other: Any) -> Any: ...\n def __hash__(self) -> Any: ...\n\nclass Index(_NotEqualMixin):\n def __new__(cls, kind: Any, properties: Any, ancestor: Any): ...\n @property\n def kind(self): ...\n @property\n def properties(self): ...\n @property\n def ancestor(self): ...\n def __eq__(self, other: Any) -> Any: ...\n def __hash__(self) -> Any: ...\n\nclass IndexState(_NotEqualMixin):\n def __new__(cls, definition: Any, state: Any, id: Any): ...\n @property\n def definition(self): ...\n @property\n def state(self): ...\n @property\n def id(self): ...\n def __eq__(self, other: Any) -> Any: ...\n def __hash__(self) -> Any: ...\n\nclass ModelAdapter:\n def __new__(self, *args: Any, **kwargs: Any) -> None: ...\n\ndef make_connection(*args: Any, **kwargs: Any) -> None: ...\n\nclass ModelAttribute: ...\n\nclass _BaseValue(_NotEqualMixin):\n b_val: Any = ...\n def __init__(self, b_val: Any) -> None: ...\n def __eq__(self, other: Any) -> Any: ...\n def __hash__(self) -> Any: ...\n\nclass Property(ModelAttribute):\n def __init__(\n self,\n name: Optional[Any] = ...,\n indexed: Optional[Any] = ...,\n repeated: Optional[Any] = ...,\n required: Optional[Any] = ...,\n default: Optional[Any] = ...,\n choices: Optional[Any] = ...,\n validator: Optional[Any] = ...,\n verbose_name: Optional[Any] = ...,\n write_empty_list: Optional[Any] = ...,\n ) -> None: ...\n def __eq__(self, value: Any) -> Any: ...\n def __ne__(self, value: Any) -> Any: ...\n def __lt__(self, value: Any) -> Any: ...\n def __le__(self, value: Any) -> Any: ...\n def __gt__(self, value: Any) -> Any: ...\n def __ge__(self, value: Any) -> Any: ...\n IN: Any = ...\n def __neg__(self): ...\n def __pos__(self): ...\n def __get__(self, entity: Any, unused_cls: Optional[Any] = ...): ...\n def __set__(self, entity: Any, value: Any) -> None: ...\n def __delete__(self, entity: Any) -> None: ...\n\nclass ModelKey(Property):\n def __init__(self) -> None: ...\n\nclass BooleanProperty(Property): ...\nclass IntegerProperty(Property): ...\nclass FloatProperty(Property): ...\n\nclass _CompressedValue(six.binary_type):\n z_val: Any = ...\n def __init__(self, z_val: Any) -> None: ...\n def __eq__(self, other: Any) -> Any: ...\n def __hash__(self) -> Any: ...\n\nclass BlobProperty(Property):\n def __init__(\n self,\n name: Optional[Any] = ...,\n compressed: Optional[Any] = ...,\n indexed: Optional[Any] = ...,\n repeated: Optional[Any] = ...,\n required: Optional[Any] = ...,\n default: Optional[Any] = ...,\n choices: Optional[Any] = ...,\n validator: Optional[Any] = ...,\n verbose_name: Optional[Any] = ...,\n write_empty_list: Optional[Any] = ...,\n ) -> None: ...\n\nclass CompressedTextProperty(BlobProperty):\n def __init__(self, *args: Any, **kwargs: Any) -> None: ...\n\nclass TextProperty(Property):\n def __new__(cls, *args: Any, **kwargs: Any): ...\n def __init__(self, *args: Any, **kwargs: Any) -> None: ...\n\nclass StringProperty(TextProperty):\n def __init__(self, *args: Any, **kwargs: Any) -> None: ...\n\nclass GeoPtProperty(Property): ...\nclass PickleProperty(BlobProperty): ...\n\nclass JsonProperty(BlobProperty):\n def __init__(\n self,\n name: Optional[Any] = ...,\n compressed: Optional[Any] = ...,\n json_type: Optional[Any] = ...,\n indexed: Optional[Any] = ...,\n repeated: Optional[Any] = ...,\n required: Optional[Any] = ...,\n default: Optional[Any] = ...,\n choices: Optional[Any] = ...,\n validator: Optional[Any] = ...,\n verbose_name: Optional[Any] = ...,\n write_empty_list: Optional[Any] = ...,\n ) -> None: ...\n\nclass User:\n def __init__(\n self,\n email: Optional[Any] = ...,\n _auth_domain: Optional[Any] = ...,\n _user_id: Optional[Any] = ...,\n ) -> None: ...\n def nickname(self): ...\n def email(self): ...\n def user_id(self): ...\n def auth_domain(self): ...\n def __hash__(self) -> Any: ...\n def __eq__(self, other: Any) -> Any: ...\n def __lt__(self, other: Any) -> Any: ...\n\nclass UserProperty(Property):\n def __init__(\n self,\n name: Optional[Any] = ...,\n auto_current_user: Optional[Any] = ...,\n auto_current_user_add: Optional[Any] = ...,\n indexed: Optional[Any] = ...,\n repeated: Optional[Any] = ...,\n required: Optional[Any] = ...,\n default: Optional[Any] = ...,\n choices: Optional[Any] = ...,\n validator: Optional[Any] = ...,\n verbose_name: Optional[Any] = ...,\n write_empty_list: Optional[Any] = ...,\n ) -> None: ...\n\nclass KeyProperty(Property):\n def __init__(\n self,\n name: Optional[Any] = ...,\n kind: Optional[Any] = ...,\n indexed: Optional[Any] = ...,\n repeated: Optional[Any] = ...,\n required: Optional[Any] = ...,\n default: Optional[Any] = ...,\n choices: Optional[Any] = ...,\n validator: Optional[Any] = ...,\n verbose_name: Optional[Any] = ...,\n write_empty_list: Optional[Any] = ...,\n ) -> None: ...\n\nclass BlobKeyProperty(Property): ...\n\nclass DateTimeProperty(Property):\n def __init__(\n self,\n name: Optional[Any] = ...,\n auto_now: Optional[Any] = ...,\n auto_now_add: Optional[Any] = ...,\n tzinfo: Optional[Any] = ...,\n indexed: Optional[Any] = ...,\n repeated: Optional[Any] = ...,\n required: Optional[Any] = ...,\n default: Optional[Any] = ...,\n choices: Optional[Any] = ...,\n validator: Optional[Any] = ...,\n verbose_name: Optional[Any] = ...,\n write_empty_list: Optional[Any] = ...,\n ) -> None: ...\n\nclass DateProperty(DateTimeProperty): ...\nclass TimeProperty(DateTimeProperty): ...\n\nclass StructuredProperty(Property):\n def __init__(\n self, model_class: Any, name: Optional[Any] = ..., **kwargs: Any\n ) -> None: ...\n def __getattr__(self, attrname: Any): ...\n IN: Any = ...\n\nclass LocalStructuredProperty(BlobProperty):\n def __init__(self, model_class: Any, **kwargs: Any) -> None: ...\n\nclass GenericProperty(Property):\n def __init__(\n self, name: Optional[Any] = ..., compressed: bool = ..., **kwargs: Any\n ) -> None: ...\n\nclass ComputedProperty(GenericProperty):\n def __init__(\n self,\n func: Any,\n name: Optional[Any] = ...,\n indexed: Optional[Any] = ...,\n repeated: Optional[Any] = ...,\n verbose_name: Optional[Any] = ...,\n ) -> None: ...\n\nclass MetaModel(type):\n def __init__(cls, name: Any, bases: Any, classdict: Any) -> None: ...\n\nclass Model(_NotEqualMixin, metaclass=MetaModel):\n key: Any = ...\n def __init__(_self: Any, **kwargs: Any) -> None: ...\n def __hash__(self) -> Any: ...\n def __eq__(self, other: Any) -> Any: ...\n def __lt__(self, value: Any) -> Any: ...\n def __le__(self, value: Any) -> Any: ...\n def __gt__(self, value: Any) -> Any: ...\n def __ge__(self, value: Any) -> Any: ...\n gql: Any = ...\n put: Any = ...\n put_async: Any = ...\n query: Any = ...\n allocate_ids: Any = ...\n allocate_ids_async: Any = ...\n get_by_id: Any = ...\n get_by_id_async: Any = ...\n get_or_insert: Any = ...\n get_or_insert_async: Any = ...\n populate: Any = ...\n has_complete_key: Any = ...\n to_dict: Any = ...\n\nclass Expando(Model):\n def __getattr__(self, name: Any): ...\n def __setattr__(self, name: Any, value: Any): ...\n def __delattr__(self, name: Any): ...\n\ndef get_multi_async(\n keys: Any,\n read_consistency: Optional[Any] = ...,\n read_policy: Optional[Any] = ...,\n transaction: Optional[Any] = ...,\n retries: Optional[Any] = ...,\n timeout: Optional[Any] = ...,\n deadline: Optional[Any] = ...,\n use_cache: Optional[Any] = ...,\n use_global_cache: Optional[Any] = ...,\n global_cache_timeout: Optional[Any] = ...,\n use_datastore: Optional[Any] = ...,\n use_memcache: Optional[Any] = ...,\n memcache_timeout: Optional[Any] = ...,\n max_memcache_items: Optional[Any] = ...,\n force_writes: Optional[Any] = ...,\n _options: Optional[Any] = ...,\n): ...\ndef get_multi(\n keys: Any,\n read_consistency: Optional[Any] = ...,\n read_policy: Optional[Any] = ...,\n transaction: Optional[Any] = ...,\n retries: Optional[Any] = ...,\n timeout: Optional[Any] = ...,\n deadline: Optional[Any] = ...,\n use_cache: Optional[Any] = ...,\n use_global_cache: Optional[Any] = ...,\n global_cache_timeout: Optional[Any] = ...,\n use_datastore: Optional[Any] = ...,\n use_memcache: Optional[Any] = ...,\n memcache_timeout: Optional[Any] = ...,\n max_memcache_items: Optional[Any] = ...,\n force_writes: Optional[Any] = ...,\n _options: Optional[Any] = ...,\n): ...\ndef put_multi_async(\n entities: Any,\n retries: Optional[Any] = ...,\n timeout: Optional[Any] = ...,\n deadline: Optional[Any] = ...,\n use_cache: Optional[Any] = ...,\n use_global_cache: Optional[Any] = ...,\n global_cache_timeout: Optional[Any] = ...,\n use_datastore: Optional[Any] = ...,\n use_memcache: Optional[Any] = ...,\n memcache_timeout: Optional[Any] = ...,\n max_memcache_items: Optional[Any] = ...,\n force_writes: Optional[Any] = ...,\n _options: Optional[Any] = ...,\n): ...\ndef put_multi(\n entities: Any,\n retries: Optional[Any] = ...,\n timeout: Optional[Any] = ...,\n deadline: Optional[Any] = ...,\n use_cache: Optional[Any] = ...,\n use_global_cache: Optional[Any] = ...,\n global_cache_timeout: Optional[Any] = ...,\n use_datastore: Optional[Any] = ...,\n use_memcache: Optional[Any] = ...,\n memcache_timeout: Optional[Any] = ...,\n max_memcache_items: Optional[Any] = ...,\n force_writes: Optional[Any] = ...,\n _options: Optional[Any] = ...,\n): ...\ndef delete_multi_async(\n keys: Any,\n retries: Optional[Any] = ...,\n timeout: Optional[Any] = ...,\n deadline: Optional[Any] = ...,\n use_cache: Optional[Any] = ...,\n use_global_cache: Optional[Any] = ...,\n global_cache_timeout: Optional[Any] = ...,\n use_datastore: Optional[Any] = ...,\n use_memcache: Optional[Any] = ...,\n memcache_timeout: Optional[Any] = ...,\n max_memcache_items: Optional[Any] = ...,\n force_writes: Optional[Any] = ...,\n _options: Optional[Any] = ...,\n): ...\ndef delete_multi(\n keys: Any,\n retries: Optional[Any] = ...,\n timeout: Optional[Any] = ...,\n deadline: Optional[Any] = ...,\n use_cache: Optional[Any] = ...,\n use_global_cache: Optional[Any] = ...,\n global_cache_timeout: Optional[Any] = ...,\n use_datastore: Optional[Any] = ...,\n use_memcache: Optional[Any] = ...,\n memcache_timeout: Optional[Any] = ...,\n max_memcache_items: Optional[Any] = ...,\n force_writes: Optional[Any] = ...,\n _options: Optional[Any] = ...,\n): ...\ndef get_indexes_async(**options: Any) -> None: ...\ndef get_indexes(**options: Any) -> None: ...\ndef _entity_to_protobuf(entity: Any): ...\n" }, { "alpha_fraction": 0.6066790223121643, "alphanum_fraction": 0.6085343360900879, "avg_line_length": 32.6875, "blob_id": "254db163aa06d5d5952f4e62e1dc76ba196406f4", "content_id": "d2643cc512b51c54a4791e378b985ace04206e09", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 539, "license_type": "permissive", "max_line_length": 84, "num_lines": 16, "path": "/stubs/google/cloud/datastore/transaction.pyi", "repo_name": "ZachOrr/google-cloud-datastore-stub", "src_encoding": "UTF-8", "text": "from google.cloud.datastore.batch import Batch as Batch\nfrom google.cloud.datastore_v1.types import TransactionOptions as TransactionOptions\nfrom typing import Any\n\nclass Transaction(Batch):\n _status: Any = ...\n _id: Any = ...\n _options: Any = ...\n def __init__(self, client: Any, read_only: bool = ...) -> None: ...\n @property\n def id(self): ...\n def current(self): ...\n def begin(self) -> None: ...\n def rollback(self) -> None: ...\n def commit(self) -> None: ...\n def put(self, entity: Any) -> None: ...\n" }, { "alpha_fraction": 0.6086434721946716, "alphanum_fraction": 0.6086434721946716, "avg_line_length": 36.8636360168457, "blob_id": "b6dd3788bf14b587dd1940364e619cfff472618d", "content_id": "4659650879fa210b5d0b885b17d633ac1367a43f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 833, "license_type": "permissive", "max_line_length": 76, "num_lines": 22, "path": "/stubs/google/cloud/datastore_v1/proto/datastore_pb2_grpc.pyi", "repo_name": "ZachOrr/google-cloud-datastore-stub", "src_encoding": "UTF-8", "text": "from typing import Any\n\nclass DatastoreStub:\n Lookup: Any = ...\n RunQuery: Any = ...\n BeginTransaction: Any = ...\n Commit: Any = ...\n Rollback: Any = ...\n AllocateIds: Any = ...\n ReserveIds: Any = ...\n def __init__(self, channel: Any) -> None: ...\n\nclass DatastoreServicer:\n def Lookup(self, request: Any, context: Any) -> None: ...\n def RunQuery(self, request: Any, context: Any) -> None: ...\n def BeginTransaction(self, request: Any, context: Any) -> None: ...\n def Commit(self, request: Any, context: Any) -> None: ...\n def Rollback(self, request: Any, context: Any) -> None: ...\n def AllocateIds(self, request: Any, context: Any) -> None: ...\n def ReserveIds(self, request: Any, context: Any) -> None: ...\n\ndef add_DatastoreServicer_to_server(servicer: Any, server: Any) -> None: ...\n" }, { "alpha_fraction": 0.6133891344070435, "alphanum_fraction": 0.6147838234901428, "avg_line_length": 38.180328369140625, "blob_id": "048abe9192f463cfe821e1279a15da27b3772fe4", "content_id": "ce2c2f7391aa2d0c610525126a27670220554260", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7170, "license_type": "permissive", "max_line_length": 89, "num_lines": 183, "path": "/InMemoryCloudDatastoreStub/datastore_stub.py", "repo_name": "ZachOrr/google-cloud-datastore-stub", "src_encoding": "UTF-8", "text": "import google.cloud.datastore.helpers as ds_helpers\nfrom google.cloud import ndb\nfrom google.cloud.datastore_v1 import types\nfrom google.cloud.datastore_v1.proto import datastore_pb2_grpc\nfrom typing import List\n\nfrom ._in_memory_store import _InMemoryStore\nfrom ._request_wrapper import _RequestWrapper\nfrom ._stored_object import _StoredObject\nfrom ._transactions import _TransactionType\n\n\nclass LocalDatastoreStub(datastore_pb2_grpc.DatastoreStub):\n\n _OPERATOR_TO_CMP_METHOD_NAME = {\n types.PropertyFilter.Operator.LESS_THAN: \"__lt__\",\n types.PropertyFilter.Operator.LESS_THAN_OR_EQUAL: \"__le__\",\n types.PropertyFilter.Operator.GREATER_THAN: \"__gt__\",\n types.PropertyFilter.Operator.GREATER_THAN_OR_EQUAL: \"__ge__\",\n types.PropertyFilter.Operator.EQUAL: \"__eq__\",\n # TODO missing HAS_ANCESTOR\n }\n\n store: _InMemoryStore\n\n Lookup: _RequestWrapper\n Commit: _RequestWrapper\n RunQuery: _RequestWrapper\n BeginTransaction: _RequestWrapper\n Rollback: _RequestWrapper\n # AllocateIds: _RequestWrapper\n # ReserveIds: _RequestWrapper\n\n def __init__(self) -> None:\n self.store = _InMemoryStore()\n\n self.Lookup = _RequestWrapper(self._lookup)\n self.Commit = _RequestWrapper(self._commit)\n self.RunQuery = _RequestWrapper(self._run_query)\n self.BeginTransaction = _RequestWrapper(self._begin_transaction)\n self.Rollback = _RequestWrapper(self._rollback)\n\n def _insert_model(self, model: ndb.Model) -> None:\n ds_key = model.key._key.to_protobuf()\n assert self.store.get(ds_key, None) is None\n entity_proto = ndb.model._entity_to_protobuf(model)\n self.store.put(entity_proto, 0, None)\n\n def _lookup(\n self, request: types.LookupRequest, *args, **kwargs\n ) -> types.LookupResponse:\n found: List[types.EntityResult] = []\n missing: List[types.EntityResult] = []\n transaction_id = request.read_options.transaction\n\n for key in request.keys:\n stored_data = self.store.get(key, transaction_id)\n if stored_data:\n found.append(\n types.EntityResult(\n entity=stored_data.entity, version=stored_data.version\n )\n )\n else:\n missing.append(\n types.EntityResult(\n entity=types.Entity(key=key),\n version=self.store.seqid(transaction_id),\n )\n )\n\n return types.LookupResponse(found=found, missing=missing,)\n\n def _begin_transaction(\n self, request: types.BeginTransactionRequest, *args, **kwargs\n ) -> types.BeginTransactionResponse:\n transaction_mode = None\n request_type = request.transaction_options.WhichOneof(\"mode\")\n if request_type == \"read_write\":\n transaction_mode = _TransactionType.READ_WRITE\n elif request_type == \"read_only\":\n transaction_mode = _TransactionType.READ_ONLY\n assert transaction_mode is not None\n transaction_id = self.store.beginTransaction(transaction_mode)\n\n return types.BeginTransactionResponse(transaction=transaction_id,)\n\n def _commit(\n self, request: types.CommitRequest, *args, **kwargs\n ) -> types.CommitResponse:\n results: List[types.MutationResult] = self.store.commitTransaction(\n request.transaction, request.mutations\n )\n return types.CommitResponse(mutation_results=results, index_updates=0,)\n\n def _rollback(\n self, request: types.RollbackRequest, *args, **kwargs\n ) -> types.RollbackResponse:\n self.store.rollbackTransaction(request.transaction)\n return types.RollbackResponse()\n\n def _run_query(\n self, request: types.RunQueryRequest, *args, **kwargs\n ) -> types.RunQueryResponse:\n # Don't support cloud sql\n # TODO also figire out error handling\n assert request.query\n\n # Query processing will be very naive.\n query: types.Query = request.query\n transaction_id: bytes = request.read_options.transaction\n resp_data: List[_StoredObject] = []\n for _, stored in self.store.items(transaction_id):\n if query.kind and stored.entity.key.path[-1].kind != query.kind[0].name:\n # this doesn't account for ancestor keys\n continue\n\n if self._matches_filter(stored, query.filter):\n resp_data.append(stored)\n\n if query.order:\n # TODO\n assert len(query.order) == 1\n order = query.order[0]\n assert order.direction in [\n types.PropertyOrder.Direction.DESCENDING,\n types.PropertyOrder.Direction.ASCENDING,\n ]\n resp_data.sort(\n key=lambda d: ds_helpers._get_value_from_value_pb(\n d.entity.properties.get(order.property.name)\n ),\n reverse=order.direction == types.PropertyOrder.Direction.DESCENDING,\n )\n\n if query.limit:\n resp_data = resp_data[: query.limit.value]\n\n return types.RunQueryResponse(\n batch=types.QueryResultBatch(\n entity_result_type=types.EntityResult.ResultType.FULL, # TODO projection\n entity_results=[\n types.EntityResult(entity=resp.entity, version=resp.version,)\n for resp in resp_data\n ],\n snapshot_version=self.store.seqid(transaction_id),\n )\n )\n\n def _matches_filter(\n self, stored_obj: _StoredObject, query_filter: types.Filter\n ) -> bool:\n filter_type = query_filter.WhichOneof(\"filter_type\")\n assert filter_type in [\"property_filter\", \"composite_filter\"]\n if filter_type == \"property_filter\":\n return self._matches_property_filter(\n stored_obj, query_filter.property_filter\n )\n elif filter_type == \"composite_filter\":\n return self._matches_composite_filter(\n stored_obj, query_filter.composite_filter\n )\n return False\n\n def _matches_composite_filter(\n self, stored_obj: _StoredObject, comp_filter: types.CompositeFilter\n ) -> bool:\n assert comp_filter.op == types.CompositeFilter.Operator.AND\n results = [self._matches_filter(stored_obj, f) for f in comp_filter.filters]\n return all(results)\n\n def _matches_property_filter(\n self, stored_obj: _StoredObject, prop_filter: types.PropertyFilter\n ) -> bool:\n for prop_name, prop_val_pb in stored_obj.entity.properties.items():\n prop_val = ds_helpers._get_value_from_value_pb(prop_val_pb)\n if prop_name == prop_filter.property.name:\n op = prop_filter.op\n filter_val = ds_helpers._get_value_from_value_pb(prop_filter.value)\n method_name = self._OPERATOR_TO_CMP_METHOD_NAME.get(op)\n assert method_name\n return getattr(prop_val, method_name)(filter_val)\n return False\n" }, { "alpha_fraction": 0.7777777910232544, "alphanum_fraction": 0.7777777910232544, "avg_line_length": 11, "blob_id": "be8396c33f43a3d1a59c83aae41e831890dfe1d8", "content_id": "bf021bb42f3e1be7189cbf58e4f01ff8c8c7e063", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 36, "license_type": "permissive", "max_line_length": 22, "num_lines": 3, "path": "/stubs/google/cloud/datastore_v1/gapic/datastore_client_config.pyi", "repo_name": "ZachOrr/google-cloud-datastore-stub", "src_encoding": "UTF-8", "text": "from typing import Any\n\nconfig: Any\n" }, { "alpha_fraction": 0.6895161271095276, "alphanum_fraction": 0.6895161271095276, "avg_line_length": 26.55555534362793, "blob_id": "b4ce0765635dc565e2ba4e2de55f5f431fe98079", "content_id": "7cd0f788a54a1c278fb5c0021eb97d29f45c3070", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 248, "license_type": "permissive", "max_line_length": 55, "num_lines": 9, "path": "/stubs/google/cloud/ndb/_retry.pyi", "repo_name": "ZachOrr/google-cloud-datastore-stub", "src_encoding": "UTF-8", "text": "from google.cloud.ndb import tasklets as tasklets\nfrom typing import Any\n\ndef wraps_safely(obj: Any, attr_names: Any = ...): ...\ndef retry_async(callback: Any, retries: Any = ...): ...\n\nTRANSIENT_CODES: Any\n\ndef is_transient_error(error: Any): ...\n" }, { "alpha_fraction": 0.7872340679168701, "alphanum_fraction": 0.7872340679168701, "avg_line_length": 46, "blob_id": "d0730bb6ff645727d77c2473ce143c09e8113649", "content_id": "3fd818c5996616f3a13cc6e8b3307608027c085a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 47, "license_type": "permissive", "max_line_length": 46, "num_lines": 1, "path": "/README.md", "repo_name": "ZachOrr/google-cloud-datastore-stub", "src_encoding": "UTF-8", "text": "# An In-Memory Stub for Google Cloud Datastore\n" }, { "alpha_fraction": 0.5685028433799744, "alphanum_fraction": 0.5685028433799744, "avg_line_length": 26.230770111083984, "blob_id": "ad091ff833855a26bc942349e690b73b20017846", "content_id": "7777deb2e85d2f57c104ac86ee27010b8889ba87", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1416, "license_type": "permissive", "max_line_length": 48, "num_lines": 52, "path": "/stubs/google/cloud/datastore_v1/gapic/enums.pyi", "repo_name": "ZachOrr/google-cloud-datastore-stub", "src_encoding": "UTF-8", "text": "import enum\n\nclass NullValue(enum.IntEnum):\n NULL_VALUE: int = ...\n\nclass CommitRequest:\n class Mode(enum.IntEnum):\n MODE_UNSPECIFIED: int = ...\n TRANSACTIONAL: int = ...\n NON_TRANSACTIONAL: int = ...\n\nclass CompositeFilter:\n class Operator(enum.IntEnum):\n OPERATOR_UNSPECIFIED: int = ...\n AND: int = ...\n\nclass EntityResult:\n class ResultType(enum.IntEnum):\n RESULT_TYPE_UNSPECIFIED: int = ...\n FULL: int = ...\n PROJECTION: int = ...\n KEY_ONLY: int = ...\n\nclass PropertyFilter:\n class Operator(enum.IntEnum):\n OPERATOR_UNSPECIFIED: int = ...\n LESS_THAN: int = ...\n LESS_THAN_OR_EQUAL: int = ...\n GREATER_THAN: int = ...\n GREATER_THAN_OR_EQUAL: int = ...\n EQUAL: int = ...\n HAS_ANCESTOR: int = ...\n\nclass PropertyOrder:\n class Direction(enum.IntEnum):\n DIRECTION_UNSPECIFIED: int = ...\n ASCENDING: int = ...\n DESCENDING: int = ...\n\nclass QueryResultBatch:\n class MoreResultsType(enum.IntEnum):\n MORE_RESULTS_TYPE_UNSPECIFIED: int = ...\n NOT_FINISHED: int = ...\n MORE_RESULTS_AFTER_LIMIT: int = ...\n MORE_RESULTS_AFTER_CURSOR: int = ...\n NO_MORE_RESULTS: int = ...\n\nclass ReadOptions:\n class ReadConsistency(enum.IntEnum):\n READ_CONSISTENCY_UNSPECIFIED: int = ...\n STRONG: int = ...\n EVENTUAL: int = ...\n" }, { "alpha_fraction": 0.7260450124740601, "alphanum_fraction": 0.735048234462738, "avg_line_length": 18.935897827148438, "blob_id": "bfddee63c91585514d50c4bbdeb6b1464dd23d50", "content_id": "25c996fe5f0a48d4b9c79fd9ccbb602168de0a04", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1555, "license_type": "permissive", "max_line_length": 59, "num_lines": 78, "path": "/stubs/google/cloud/datastore_v1/types.pyi", "repo_name": "ZachOrr/google-cloud-datastore-stub", "src_encoding": "UTF-8", "text": "from google.cloud.datastore_v1.proto.datastore_pb2 import *\nfrom google.cloud.datastore_v1.proto.entity_pb2 import *\nfrom google.cloud.datastore_v1.proto.query_pb2 import *\n\n# tNames in __all__ with no definition:\n# AllocateIdsRequest\n# AllocateIdsResponse\n# ArrayValue\n# BeginTransactionRequest\n# BeginTransactionResponse\n# BoolValue\n# BytesValue\n# CommitRequest\n# CommitResponse\n# CompositeFilter\n# CustomHttpPattern\n# DescriptorProto\n# DoubleValue\n# Entity\n# EntityResult\n# EnumDescriptorProto\n# EnumOptions\n# EnumValueDescriptorProto\n# EnumValueOptions\n# ExtensionRangeOptions\n# FieldDescriptorProto\n# FieldOptions\n# FileDescriptorProto\n# FileDescriptorSet\n# FileOptions\n# Filter\n# FloatValue\n# GeneratedCodeInfo\n# GqlQuery\n# GqlQueryParameter\n# Http\n# HttpRule\n# Int32Value\n# Int64Value\n# Key\n# KindExpression\n# LatLng\n# ListValue\n# LookupRequest\n# LookupResponse\n# MessageOptions\n# MethodDescriptorProto\n# MethodOptions\n# Mutation\n# MutationResult\n# OneofDescriptorProto\n# OneofOptions\n# PartitionId\n# Projection\n# PropertyFilter\n# PropertyOrder\n# PropertyReference\n# Query\n# QueryResultBatch\n# ReadOptions\n# ReserveIdsRequest\n# ReserveIdsResponse\n# RollbackRequest\n# RollbackResponse\n# RunQueryRequest\n# RunQueryResponse\n# ServiceDescriptorProto\n# ServiceOptions\n# SourceCodeInfo\n# StringValue\n# Struct\n# Timestamp\n# TransactionOptions\n# UInt32Value\n# UInt64Value\n# UninterpretedOption\n# Value\n# Value\n" }, { "alpha_fraction": 0.5972822904586792, "alphanum_fraction": 0.5972822904586792, "avg_line_length": 28.981481552124023, "blob_id": "2954c2d97b44f3a32a2db95450dc604921d27def", "content_id": "62c9ee84c3a8169310536bb834125bfb4c07e480", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1619, "license_type": "permissive", "max_line_length": 77, "num_lines": 54, "path": "/stubs/google/cloud/ndb/metadata.pyi", "repo_name": "ZachOrr/google-cloud-datastore-stub", "src_encoding": "UTF-8", "text": "from google.cloud.ndb import model\nfrom typing import Any, Optional\n\nclass _BaseMetadata(model.Model):\n KIND_NAME: str = ...\n def __new__(cls, *args: Any, **kwargs: Any): ...\n\nclass Namespace(_BaseMetadata):\n KIND_NAME: str = ...\n EMPTY_NAMESPACE_ID: int = ...\n @property\n def namespace_name(self): ...\n @classmethod\n def key_for_namespace(cls, namespace: Any): ...\n @classmethod\n def key_to_namespace(cls, key: Any): ...\n\nclass Kind(_BaseMetadata):\n KIND_NAME: str = ...\n @property\n def kind_name(self): ...\n @classmethod\n def key_for_kind(cls, kind: Any): ...\n @classmethod\n def key_to_kind(cls, key: Any): ...\n\nclass Property(_BaseMetadata):\n KIND_NAME: str = ...\n @property\n def property_name(self): ...\n @property\n def kind_name(self): ...\n property_representation: Any = ...\n @classmethod\n def key_for_kind(cls, kind: Any): ...\n @classmethod\n def key_for_property(cls, kind: Any, property: Any): ...\n @classmethod\n def key_to_kind(cls, key: Any): ...\n @classmethod\n def key_to_property(cls, key: Any): ...\n\nclass EntityGroup:\n def __new__(self, *args: Any, **kwargs: Any) -> None: ...\n\ndef get_entity_group_version(*args: Any, **kwargs: Any) -> None: ...\ndef get_kinds(start: Optional[Any] = ..., end: Optional[Any] = ...): ...\ndef get_namespaces(start: Optional[Any] = ..., end: Optional[Any] = ...): ...\ndef get_properties_of_kind(\n kind: Any, start: Optional[Any] = ..., end: Optional[Any] = ...\n): ...\ndef get_representations_of_kind(\n kind: Any, start: Optional[Any] = ..., end: Optional[Any] = ...\n): ...\n" }, { "alpha_fraction": 0.6575219631195068, "alphanum_fraction": 0.674376904964447, "avg_line_length": 31.80588150024414, "blob_id": "4e9cceea524752cf768ad4b8aeffa45c32ff234c", "content_id": "769ee5d42a1735269b5d705e038277f9d082db51", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5577, "license_type": "permissive", "max_line_length": 84, "num_lines": 170, "path": "/tests/query_test.py", "repo_name": "ZachOrr/google-cloud-datastore-stub", "src_encoding": "UTF-8", "text": "from InMemoryCloudDatastoreStub import datastore_stub\nfrom tests.models import SimpleModel\n\n\ndef test_get_nonexistent_key() -> None:\n model = SimpleModel.get_by_id(\"notfound\")\n assert model is None\n\n\ndef test_get_existing_by_id(ndb_stub: datastore_stub.LocalDatastoreStub) -> None:\n model = SimpleModel(id=\"test\", str_prop=\"asdf\",)\n ndb_stub._insert_model(model)\n\n model_res = SimpleModel.get_by_id(\"test\")\n assert model_res == model\n\n\ndef test_get_existing_by_field(ndb_stub: datastore_stub.LocalDatastoreStub) -> None:\n model = SimpleModel(id=\"test\", str_prop=\"asdf\",)\n ndb_stub._insert_model(model)\n\n query_res = SimpleModel.query(SimpleModel.str_prop == \"asdf\").get()\n assert query_res == model\n\n\ndef test_get_existing_by_multi_field(\n ndb_stub: datastore_stub.LocalDatastoreStub,\n) -> None:\n model = SimpleModel(id=\"test\", str_prop=\"asdf\", int_prop=42)\n ndb_stub._insert_model(model)\n\n query_res = SimpleModel.query(\n SimpleModel.str_prop == \"asdf\", SimpleModel.int_prop == 42\n ).get()\n assert query_res == model\n\n\ndef test_get_existing_by_field_not_found(\n ndb_stub: datastore_stub.LocalDatastoreStub,\n) -> None:\n model = SimpleModel(id=\"test\", str_prop=\"asdf\",)\n ndb_stub._insert_model(model)\n\n query_res = SimpleModel.query(SimpleModel.str_prop == \"foo\").get()\n assert query_res is None\n\n\ndef test_fetch_existing_by_field_multiple(\n ndb_stub: datastore_stub.LocalDatastoreStub,\n) -> None:\n model1 = SimpleModel(id=\"test\", str_prop=\"asdf\",)\n model2 = SimpleModel(id=\"test2\", str_prop=\"asdf\",)\n ndb_stub._insert_model(model1)\n ndb_stub._insert_model(model2)\n\n # We don't pass an ordering here, so the order is not deterministic\n query_res = SimpleModel.query(SimpleModel.str_prop == \"asdf\").fetch(limit=2)\n assert len(query_res) == 2\n query_res.sort(key=lambda m: m.key.id())\n assert query_res == [model1, model2]\n\n\ndef test_fetch_existing_by_field_multiple_with_order(\n ndb_stub: datastore_stub.LocalDatastoreStub,\n) -> None:\n model1 = SimpleModel(id=\"test\", str_prop=\"asdf\", int_prop=10)\n model2 = SimpleModel(id=\"test2\", str_prop=\"asdf\", int_prop=20)\n ndb_stub._insert_model(model1)\n ndb_stub._insert_model(model2)\n\n # We don't pass an ordering here, so the order is not deterministic\n query_res = (\n SimpleModel.query(SimpleModel.str_prop == \"asdf\")\n .order(SimpleModel.int_prop)\n .fetch(limit=2)\n )\n assert len(query_res) == 2\n assert query_res == [model1, model2]\n\n\ndef test_fetch_existing_by_field_with_limit(\n ndb_stub: datastore_stub.LocalDatastoreStub,\n) -> None:\n model1 = SimpleModel(id=\"test\", str_prop=\"asdf\",)\n model2 = SimpleModel(id=\"test2\", str_prop=\"asdf\",)\n ndb_stub._insert_model(model1)\n ndb_stub._insert_model(model2)\n\n # Since we don't pass an order by, it's non deterministic which we get\n query_res = SimpleModel.query(SimpleModel.str_prop == \"asdf\").fetch(limit=1)\n assert len(query_res) == 1\n assert query_res[0].str_prop == \"asdf\"\n\n\ndef test_fetch_existing_by_field_with_limit_not_hit(\n ndb_stub: datastore_stub.LocalDatastoreStub,\n) -> None:\n model1 = SimpleModel(id=\"test\", str_prop=\"asdf\",)\n model2 = SimpleModel(id=\"test2\", str_prop=\"asdfz\",)\n ndb_stub._insert_model(model1)\n ndb_stub._insert_model(model2)\n\n # Since we don't pass an order by, it's non deterministic which we get\n query_res = SimpleModel.query(SimpleModel.str_prop == \"asdf\").fetch(limit=2)\n assert len(query_res) == 1\n assert query_res[0].str_prop == \"asdf\"\n\n\ndef test_fetch_existing_by_gt(ndb_stub: datastore_stub.LocalDatastoreStub) -> None:\n model1 = SimpleModel(id=\"test\", int_prop=42,)\n model2 = SimpleModel(id=\"test2\", int_prop=43,)\n ndb_stub._insert_model(model1)\n ndb_stub._insert_model(model2)\n\n query_res = SimpleModel.query(SimpleModel.int_prop > 42).fetch(limit=5)\n assert query_res == [model2]\n\n\ndef test_fetch_existing_by_ge(ndb_stub: datastore_stub.LocalDatastoreStub) -> None:\n model1 = SimpleModel(id=\"test\", int_prop=42,)\n model2 = SimpleModel(id=\"test2\", int_prop=43,)\n ndb_stub._insert_model(model1)\n ndb_stub._insert_model(model2)\n\n query_res = SimpleModel.query(SimpleModel.int_prop >= 42).fetch(limit=5)\n assert len(query_res) == 2\n\n\ndef test_fetch_existing_by_lt(ndb_stub: datastore_stub.LocalDatastoreStub) -> None:\n model1 = SimpleModel(id=\"test\", int_prop=42,)\n model2 = SimpleModel(id=\"test2\", int_prop=43,)\n ndb_stub._insert_model(model1)\n ndb_stub._insert_model(model2)\n\n query_res = SimpleModel.query(SimpleModel.int_prop < 43).fetch(limit=5)\n assert query_res == [model1]\n\n\ndef test_fetch_existing_by_le(ndb_stub: datastore_stub.LocalDatastoreStub) -> None:\n model1 = SimpleModel(id=\"test\", int_prop=42,)\n model2 = SimpleModel(id=\"test2\", int_prop=43,)\n ndb_stub._insert_model(model1)\n ndb_stub._insert_model(model2)\n\n query_res = SimpleModel.query(SimpleModel.int_prop <= 43).fetch(limit=5)\n assert len(query_res) == 2\n\n\ndef test_put_model() -> None:\n model = SimpleModel(id=\"test\", str_prop=\"asdf\")\n key = model.put()\n\n assert key == model.key\n\n\ndef test_put_model_matches_key_get() -> None:\n model = SimpleModel(id=\"test\", str_prop=\"asdf\")\n key = model.put()\n\n get_resp = key.get()\n assert get_resp == model\n\n\ndef test_put_model_matches_point_query() -> None:\n model = SimpleModel(id=\"test\", str_prop=\"asdf\")\n key = model.put()\n\n get_resp = SimpleModel.get_by_id(\"test\")\n assert get_resp == model\n assert model.key == key\n" }, { "alpha_fraction": 0.619918704032898, "alphanum_fraction": 0.619918704032898, "avg_line_length": 29.75, "blob_id": "9cf39d8bdd02d192c36061129d21e857359b32ee", "content_id": "7c0c1722b81ea6d6939bb5cf0979946d75255f24", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1968, "license_type": "permissive", "max_line_length": 68, "num_lines": 64, "path": "/stubs/google/cloud/ndb/blobstore.pyi", "repo_name": "ZachOrr/google-cloud-datastore-stub", "src_encoding": "UTF-8", "text": "from google.cloud.ndb import model\nfrom typing import Any\n\nBlobKey: Any\nBLOB_INFO_KIND: str\nBLOB_MIGRATION_KIND: str\nBLOB_KEY_HEADER: str\nBLOB_RANGE_HEADER: str\nMAX_BLOB_FETCH_SIZE: int\nUPLOAD_INFO_CREATION_HEADER: str\nBlobKeyProperty = model.BlobKeyProperty\n\nclass BlobFetchSizeTooLargeError:\n def __init__(self, *args: Any, **kwargs: Any) -> None: ...\n\nclass BlobInfo:\n def __init__(self, *args: Any, **kwargs: Any) -> None: ...\n @classmethod\n def get(cls, *args: Any, **kwargs: Any) -> None: ...\n @classmethod\n def get_async(cls, *args: Any, **kwargs: Any) -> None: ...\n @classmethod\n def get_multi(cls, *args: Any, **kwargs: Any) -> None: ...\n @classmethod\n def get_multi_async(cls, *args: Any, **kwargs: Any) -> None: ...\n\nclass BlobInfoParseError:\n def __init__(self, *args: Any, **kwargs: Any) -> None: ...\n\nclass BlobNotFoundError:\n def __init__(self, *args: Any, **kwargs: Any) -> None: ...\n\nclass BlobReader:\n def __init__(self, *args: Any, **kwargs: Any) -> None: ...\n\ndef create_upload_url(*args: Any, **kwargs: Any) -> None: ...\ndef create_upload_url_async(*args: Any, **kwargs: Any) -> None: ...\n\nclass DataIndexOutOfRangeError:\n def __init__(self, *args: Any, **kwargs: Any) -> None: ...\n\ndef delete(*args: Any, **kwargs: Any) -> None: ...\ndef delete_async(*args: Any, **kwargs: Any) -> None: ...\ndef delete_multi(*args: Any, **kwargs: Any) -> None: ...\ndef delete_multi_async(*args: Any, **kwargs: Any) -> None: ...\n\nclass Error:\n def __init__(self, *args: Any, **kwargs: Any) -> None: ...\n\ndef fetch_data(*args: Any, **kwargs: Any) -> None: ...\ndef fetch_data_async(*args: Any, **kwargs: Any) -> None: ...\n\nget: Any\nget_async: Any\nget_multi: Any\nget_multi_async: Any\n\nclass InternalError:\n def __init__(self, *args: Any, **kwargs: Any) -> None: ...\n\ndef parse_blob_info(*args: Any, **kwargs: Any) -> None: ...\n\nclass PermissionDeniedError:\n def __init__(self, *args: Any, **kwargs: Any) -> None: ...\n" }, { "alpha_fraction": 0.6085657477378845, "alphanum_fraction": 0.6085657477378845, "avg_line_length": 30.375, "blob_id": "1d28abfcf7e86e93a385d962fe94cce08eb97521", "content_id": "53e7716e6f6ca962b9a06859d9daa270457b12d4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2008, "license_type": "permissive", "max_line_length": 84, "num_lines": 64, "path": "/stubs/google/cloud/ndb/_cache.pyi", "repo_name": "ZachOrr/google-cloud-datastore-stub", "src_encoding": "UTF-8", "text": "from UserDict import UserDict\nfrom google.cloud.ndb import tasklets as tasklets\nfrom typing import Any, Optional\n\nclass ContextCache(UserDict):\n def get_and_validate(self, key: Any): ...\n\nclass _GlobalCacheBatch:\n def full(self): ...\n def idle_callback(self) -> None: ...\n def done_callback(self, cache_call: Any) -> None: ...\n def make_call(self) -> None: ...\n def future_info(self, key: Any) -> None: ...\n\ndef global_get(key: Any): ...\n\nclass _GlobalCacheGetBatch(_GlobalCacheBatch):\n todo: Any = ...\n keys: Any = ...\n def __init__(self, ignore_options: Any) -> None: ...\n def add(self, key: Any): ...\n def done_callback(self, cache_call: Any) -> None: ...\n def make_call(self): ...\n def future_info(self, key: Any): ...\n\ndef global_set(key: Any, value: Any, expires: Optional[Any] = ...): ...\n\nclass _GlobalCacheSetBatch(_GlobalCacheBatch):\n expires: Any = ...\n todo: Any = ...\n futures: Any = ...\n def __init__(self, options: Any) -> None: ...\n def add(self, key: Any, value: Any): ...\n def make_call(self): ...\n def future_info(self, key: Any, value: Any): ...\n\ndef global_delete(key: Any): ...\n\nclass _GlobalCacheDeleteBatch(_GlobalCacheBatch):\n keys: Any = ...\n futures: Any = ...\n def __init__(self, ignore_options: Any) -> None: ...\n def add(self, key: Any): ...\n def make_call(self): ...\n def future_info(self, key: Any): ...\n\ndef global_watch(key: Any): ...\n\nclass _GlobalCacheWatchBatch(_GlobalCacheDeleteBatch):\n keys: Any = ...\n futures: Any = ...\n def __init__(self, ignore_options: Any) -> None: ...\n def make_call(self): ...\n def future_info(self, key: Any): ...\n\ndef global_compare_and_swap(key: Any, value: Any, expires: Optional[Any] = ...): ...\n\nclass _GlobalCacheCompareAndSwapBatch(_GlobalCacheSetBatch):\n def make_call(self): ...\n def future_info(self, key: Any, value: Any): ...\n\ndef global_lock(key: Any): ...\ndef is_locked_value(value: Any): ...\ndef global_cache_key(key: Any): ...\n" }, { "alpha_fraction": 0.5326923131942749, "alphanum_fraction": 0.5326923131942749, "avg_line_length": 22.636363983154297, "blob_id": "3619acd84a15311e99dcbe25ebdf603de8e8b73a", "content_id": "a82db1a8393e3a21647a228871f2e6f56ae6e4ef", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1040, "license_type": "permissive", "max_line_length": 75, "num_lines": 44, "path": "/stubs/google/cloud/ndb/_transaction.pyi", "repo_name": "ZachOrr/google-cloud-datastore-stub", "src_encoding": "UTF-8", "text": "from google.cloud.ndb import exceptions as exceptions, tasklets as tasklets\nfrom typing import Any, Optional\n\nlog: Any\n\ndef in_transaction(): ...\ndef transaction(\n callback: Any,\n retries: Any = ...,\n read_only: bool = ...,\n join: bool = ...,\n xg: bool = ...,\n propagation: Optional[Any] = ...,\n): ...\ndef transaction_async(\n callback: Any,\n retries: Any = ...,\n read_only: bool = ...,\n join: bool = ...,\n xg: bool = ...,\n propagation: Optional[Any] = ...,\n): ...\ndef transactional(\n retries: Any = ...,\n read_only: bool = ...,\n join: bool = ...,\n xg: bool = ...,\n propagation: Optional[Any] = ...,\n): ...\ndef transactional_async(\n retries: Any = ...,\n read_only: bool = ...,\n join: bool = ...,\n xg: bool = ...,\n propagation: Optional[Any] = ...,\n): ...\ndef transactional_tasklet(\n retries: Any = ...,\n read_only: bool = ...,\n join: bool = ...,\n xg: bool = ...,\n propagation: Optional[Any] = ...,\n): ...\ndef non_transactional(allow_existing: bool = ...): ...\n" }, { "alpha_fraction": 0.6792738437652588, "alphanum_fraction": 0.6913766860961914, "avg_line_length": 35.72222137451172, "blob_id": "eb8dc47ea1a4184fc72fc3ccd82734b2ba4ce6c8", "content_id": "6e2aaaae570bdd0ada0154a4f08d450a8ecc5f96", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 661, "license_type": "permissive", "max_line_length": 99, "num_lines": 18, "path": "/setup.py", "repo_name": "ZachOrr/google-cloud-datastore-stub", "src_encoding": "UTF-8", "text": "import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"InMemoryCloudDatastoreStub\",\n version=\"0.0.3\",\n description=\"An in-memory stub implementation of Google Cloud Datastore for use in unit tests\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/phil-lopreiato/google-cloud-datastore-stub/\",\n packages=setuptools.find_packages(),\n python_requires=\">=3\",\n install_requires=[\"google-cloud-ndb == 1.2.1\"],\n setup_requires=[\"pytest-runner\"],\n tests_require=[\"pytest\", \"black\", \"pyre-check\", \"flake8\", \"mypy\"],\n)\n" }, { "alpha_fraction": 0.6652906537055969, "alphanum_fraction": 0.6652906537055969, "avg_line_length": 27.78217887878418, "blob_id": "4c18519c1d25d99b3eeaaacb4d64d20de551aac5", "content_id": "135914f83be2a88a9cf53ddbb06951fef09816fb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2907, "license_type": "permissive", "max_line_length": 82, "num_lines": 101, "path": "/stubs/google/cloud/ndb/stats.pyi", "repo_name": "ZachOrr/google-cloud-datastore-stub", "src_encoding": "UTF-8", "text": "from google.cloud.ndb import model\nfrom typing import Any\n\nclass BaseStatistic(model.Model):\n STORED_KIND_NAME: str = ...\n bytes: Any = ...\n count: Any = ...\n timestamp: Any = ...\n\nclass BaseKindStatistic(BaseStatistic):\n STORED_KIND_NAME: str = ...\n kind_name: Any = ...\n entity_bytes: Any = ...\n\nclass GlobalStat(BaseStatistic):\n STORED_KIND_NAME: str = ...\n entity_bytes: Any = ...\n builtin_index_bytes: Any = ...\n builtin_index_count: Any = ...\n composite_index_bytes: Any = ...\n composite_index_count: Any = ...\n\nclass NamespaceStat(BaseStatistic):\n STORED_KIND_NAME: str = ...\n subject_namespace: Any = ...\n entity_bytes: Any = ...\n builtin_index_bytes: Any = ...\n builtin_index_count: Any = ...\n composite_index_bytes: Any = ...\n composite_index_count: Any = ...\n\nclass KindStat(BaseKindStatistic):\n STORED_KIND_NAME: str = ...\n builtin_index_bytes: Any = ...\n builtin_index_count: Any = ...\n composite_index_bytes: Any = ...\n composite_index_count: Any = ...\n\nclass KindRootEntityStat(BaseKindStatistic):\n STORED_KIND_NAME: str = ...\n\nclass KindNonRootEntityStat(BaseKindStatistic):\n STORED_KIND_NAME: str = ...\n\nclass PropertyTypeStat(BaseStatistic):\n STORED_KIND_NAME: str = ...\n property_type: Any = ...\n entity_bytes: Any = ...\n builtin_index_bytes: Any = ...\n builtin_index_count: Any = ...\n\nclass KindPropertyTypeStat(BaseKindStatistic):\n STORED_KIND_NAME: str = ...\n property_type: Any = ...\n builtin_index_bytes: Any = ...\n builtin_index_count: Any = ...\n\nclass KindPropertyNameStat(BaseKindStatistic):\n STORED_KIND_NAME: str = ...\n property_name: Any = ...\n builtin_index_bytes: Any = ...\n builtin_index_count: Any = ...\n\nclass KindPropertyNamePropertyTypeStat(BaseKindStatistic):\n STORED_KIND_NAME: str = ...\n property_type: Any = ...\n property_name: Any = ...\n builtin_index_bytes: Any = ...\n builtin_index_count: Any = ...\n\nclass KindCompositeIndexStat(BaseStatistic):\n STORED_KIND_NAME: str = ...\n index_id: Any = ...\n kind_name: Any = ...\n\nclass NamespaceGlobalStat(GlobalStat):\n STORED_KIND_NAME: str = ...\n\nclass NamespaceKindStat(KindStat):\n STORED_KIND_NAME: str = ...\n\nclass NamespaceKindRootEntityStat(KindRootEntityStat):\n STORED_KIND_NAME: str = ...\n\nclass NamespaceKindNonRootEntityStat(KindNonRootEntityStat):\n STORED_KIND_NAME: str = ...\n\nclass NamespacePropertyTypeStat(PropertyTypeStat):\n STORED_KIND_NAME: str = ...\n\nclass NamespaceKindPropertyTypeStat(KindPropertyTypeStat):\n STORED_KIND_NAME: str = ...\n\nclass NamespaceKindPropertyNameStat(KindPropertyNameStat):\n STORED_KIND_NAME: str = ...\n\nclass NamespaceKindPropertyNamePropertyTypeStat(KindPropertyNamePropertyTypeStat):\n STORED_KIND_NAME: str = ...\n\nclass NamespaceKindCompositeIndexStat(KindCompositeIndexStat):\n STORED_KIND_NAME: str = ...\n" }, { "alpha_fraction": 0.5445736646652222, "alphanum_fraction": 0.5445736646652222, "avg_line_length": 29.352941513061523, "blob_id": "bc823c49cfc490f93ff8347f4d37da9117f08ab2", "content_id": "e1548a5c0b0b5f116ad4e242bac4f3e82fdd62ea", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 516, "license_type": "permissive", "max_line_length": 80, "num_lines": 17, "path": "/stubs/google/cloud/datastore/entity.pyi", "repo_name": "ZachOrr/google-cloud-datastore-stub", "src_encoding": "UTF-8", "text": "from google.cloud._helpers import _ensure_tuple_or_list as _ensure_tuple_or_list\nfrom typing import Any, Optional\n\nclass Entity(dict):\n key: Any = ...\n exclude_from_indexes: Any = ...\n _meanings: Any = ...\n def __init__(\n self, key: Optional[Any] = ..., exclude_from_indexes: Any = ...\n ) -> None: ...\n def __eq__(self, other: Any) -> Any: ...\n def __ne__(self, other: Any) -> Any: ...\n @property\n def kind(self): ...\n @property\n def id(self): ...\n def __repr__(self): ...\n" }, { "alpha_fraction": 0.7238805890083313, "alphanum_fraction": 0.7238805890083313, "avg_line_length": 21.33333396911621, "blob_id": "0fcdc7a32f6458b50991f3f93e6c9e4fe28f27e0", "content_id": "a87777c42bb4e8c1f05b221b0271cd9bba9edfd9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 134, "license_type": "permissive", "max_line_length": 36, "num_lines": 6, "path": "/tests/models.py", "repo_name": "ZachOrr/google-cloud-datastore-stub", "src_encoding": "UTF-8", "text": "from google.cloud import ndb\n\n\nclass SimpleModel(ndb.Model):\n str_prop = ndb.StringProperty()\n int_prop = ndb.IntegerProperty()\n" }, { "alpha_fraction": 0.6573323607444763, "alphanum_fraction": 0.6639646291732788, "avg_line_length": 37.771427154541016, "blob_id": "cd86204c72dd96aa44287ed234aff4de41fa20d4", "content_id": "92725774d0c4500856fe97b4bb4eafbc85ac514b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1357, "license_type": "permissive", "max_line_length": 74, "num_lines": 35, "path": "/stubs/google/cloud/datastore/helpers.pyi", "repo_name": "ZachOrr/google-cloud-datastore-stub", "src_encoding": "UTF-8", "text": "from google.cloud._helpers import (\n _datetime_to_pb_timestamp as _datetime_to_pb_timestamp,\n _pb_timestamp_to_datetime as _pb_timestamp_to_datetime,\n)\nfrom google.cloud.datastore.entity import Entity as Entity\nfrom google.cloud.datastore.key import Key as Key\nfrom google.cloud.datastore_v1.proto import (\n datastore_pb2 as datastore_pb2,\n entity_pb2 as entity_pb2,\n)\nfrom google.protobuf import struct_pb2 as struct_pb2\nfrom google.type import latlng_pb2 as latlng_pb2\nfrom typing import Any\n\ndef _get_meaning(value_pb: Any, is_list: bool = ...): ...\ndef _new_value_pb(entity_pb: Any, name: Any): ...\ndef _property_tuples(entity_pb: Any): ...\ndef entity_from_protobuf(pb: Any): ...\ndef _set_pb_meaning_from_entity(\n entity: Any, name: Any, value: Any, value_pb: Any, is_list: bool = ...\n) -> None: ...\ndef entity_to_protobuf(entity: Any): ...\ndef get_read_options(eventual: Any, transaction_id: Any): ...\ndef key_from_protobuf(pb: Any): ...\ndef _pb_attr_value(val: Any): ...\ndef _get_value_from_value_pb(value_pb: Any): ...\ndef _set_protobuf_value(value_pb: Any, val: Any) -> None: ...\n\nclass GeoPoint:\n latitude: Any = ...\n longitude: Any = ...\n def __init__(self, latitude: Any, longitude: Any) -> None: ...\n def to_protobuf(self): ...\n def __eq__(self, other: Any) -> Any: ...\n def __ne__(self, other: Any) -> Any: ...\n" }, { "alpha_fraction": 0.6344969272613525, "alphanum_fraction": 0.6344969272613525, "avg_line_length": 27.647058486938477, "blob_id": "212bbb65482309eb91f1db2937a556054c39899e", "content_id": "8b2aa97fa08bf56c0da20e339598290c9fa4468e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 487, "license_type": "permissive", "max_line_length": 52, "num_lines": 17, "path": "/InMemoryCloudDatastoreStub/_request_wrapper.py", "repo_name": "ZachOrr/google-cloud-datastore-stub", "src_encoding": "UTF-8", "text": "import grpc\nfrom .futures import InstantFuture\n\n\nclass _RequestWrapper(grpc.UnaryUnaryMultiCallable):\n def __init__(self, func):\n self.func = func\n\n def __call__(self, request, *args, **kwargs):\n return self.func(request, *args, **kwargs)\n\n def with_call(self, request, *args, **kwargs):\n return self(request, *args, **kwargs)\n\n def future(self, request, *args, **kwargs):\n resp = self(request, *args, **kwargs)\n return InstantFuture(resp)\n" }, { "alpha_fraction": 0.806779682636261, "alphanum_fraction": 0.810169517993927, "avg_line_length": 48.16666793823242, "blob_id": "cbd91a234154e0cee2c96a8abd5abc6559e47108", "content_id": "16bbcf182ce8893133f0537c522139f87ba2ffcb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 295, "license_type": "permissive", "max_line_length": 80, "num_lines": 6, "path": "/stubs/google/cloud/datastore/_gapic.pyi", "repo_name": "ZachOrr/google-cloud-datastore-stub", "src_encoding": "UTF-8", "text": "from google.cloud._helpers import make_secure_channel as make_secure_channel\nfrom google.cloud._http import DEFAULT_USER_AGENT as DEFAULT_USER_AGENT\nfrom google.cloud.datastore_v1.gapic import datastore_client as datastore_client\nfrom typing import Any\n\ndef make_datastore_api(client: Any): ...\n" }, { "alpha_fraction": 0.5997375249862671, "alphanum_fraction": 0.5997375249862671, "avg_line_length": 29.479999542236328, "blob_id": "9abdc957a3390ad5393ac5e83ecd657bdcce3b2f", "content_id": "024baf22c728a64683a4dfd89051a0fed631032a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 762, "license_type": "permissive", "max_line_length": 57, "num_lines": 25, "path": "/stubs/google/cloud/ndb/utils.pyi", "repo_name": "ZachOrr/google-cloud-datastore-stub", "src_encoding": "UTF-8", "text": "import threading\nfrom typing import Any\n\ndef code_info(*args: Any, **kwargs: Any) -> None: ...\n\nDEBUG: bool\n\ndef decorator(*args: Any, **kwargs: Any) -> None: ...\ndef frame_info(*args: Any, **kwargs: Any) -> None: ...\ndef func_info(*args: Any, **kwargs: Any) -> None: ...\ndef gen_info(*args: Any, **kwargs: Any) -> None: ...\ndef get_stack(*args: Any, **kwargs: Any) -> None: ...\ndef logging_debug(*args: Any, **kwargs: Any) -> None: ...\n\nclass keyword_only:\n defaults: Any = ...\n def __init__(self, **kwargs: Any) -> None: ...\n def __call__(self, wrapped: Any): ...\n\ndef positional(max_pos_args: Any): ...\n\nthreading_local = threading.local\n\ndef tweak_logging(*args: Any, **kwargs: Any) -> None: ...\ndef wrapping(*args: Any, **kwargs: Any) -> None: ...\n" }, { "alpha_fraction": 0.5048654675483704, "alphanum_fraction": 0.5048654675483704, "avg_line_length": 34.65306091308594, "blob_id": "cd6b4a8c92a3093a9cbb47eb6bca28e5a1f94932", "content_id": "1b230198b805c11e01e915d36e2a9c30be7a68d3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3494, "license_type": "permissive", "max_line_length": 58, "num_lines": 98, "path": "/stubs/google/cloud/ndb/key.pyi", "repo_name": "ZachOrr/google-cloud-datastore-stub", "src_encoding": "UTF-8", "text": "from typing import Any, Optional\n\nUNDEFINED: Any\n\nclass Key:\n def __new__(cls, *path_args: Any, **kwargs: Any): ...\n def __hash__(self) -> Any: ...\n def __eq__(self, other: Any) -> Any: ...\n def __ne__(self, other: Any) -> Any: ...\n def __lt__(self, other: Any) -> Any: ...\n def __le__(self, other: Any) -> Any: ...\n def __gt__(self, other: Any) -> Any: ...\n def __ge__(self, other: Any) -> Any: ...\n def __getnewargs__(self): ...\n def parent(self): ...\n def root(self): ...\n def namespace(self): ...\n def project(self): ...\n app: Any = ...\n def id(self): ...\n def string_id(self): ...\n def integer_id(self): ...\n def pairs(self): ...\n def flat(self): ...\n def kind(self): ...\n def reference(self): ...\n def serialized(self): ...\n def urlsafe(self): ...\n def to_legacy_urlsafe(self, location_prefix: Any): ...\n def get(\n self,\n read_consistency: Optional[Any] = ...,\n read_policy: Optional[Any] = ...,\n transaction: Optional[Any] = ...,\n retries: Optional[Any] = ...,\n timeout: Optional[Any] = ...,\n deadline: Optional[Any] = ...,\n use_cache: Optional[Any] = ...,\n use_global_cache: Optional[Any] = ...,\n use_datastore: Optional[Any] = ...,\n global_cache_timeout: Optional[Any] = ...,\n use_memcache: Optional[Any] = ...,\n memcache_timeout: Optional[Any] = ...,\n max_memcache_items: Optional[Any] = ...,\n force_writes: Optional[Any] = ...,\n _options: Optional[Any] = ...,\n ): ...\n def get_async(\n self,\n read_consistency: Optional[Any] = ...,\n read_policy: Optional[Any] = ...,\n transaction: Optional[Any] = ...,\n retries: Optional[Any] = ...,\n timeout: Optional[Any] = ...,\n deadline: Optional[Any] = ...,\n use_cache: Optional[Any] = ...,\n use_global_cache: Optional[Any] = ...,\n use_datastore: Optional[Any] = ...,\n global_cache_timeout: Optional[Any] = ...,\n use_memcache: Optional[Any] = ...,\n memcache_timeout: Optional[Any] = ...,\n max_memcache_items: Optional[Any] = ...,\n force_writes: Optional[Any] = ...,\n _options: Optional[Any] = ...,\n ): ...\n def delete(\n self,\n retries: Optional[Any] = ...,\n timeout: Optional[Any] = ...,\n deadline: Optional[Any] = ...,\n use_cache: Optional[Any] = ...,\n use_global_cache: Optional[Any] = ...,\n use_datastore: Optional[Any] = ...,\n global_cache_timeout: Optional[Any] = ...,\n use_memcache: Optional[Any] = ...,\n memcache_timeout: Optional[Any] = ...,\n max_memcache_items: Optional[Any] = ...,\n force_writes: Optional[Any] = ...,\n _options: Optional[Any] = ...,\n ): ...\n def delete_async(\n self,\n retries: Optional[Any] = ...,\n timeout: Optional[Any] = ...,\n deadline: Optional[Any] = ...,\n use_cache: Optional[Any] = ...,\n use_global_cache: Optional[Any] = ...,\n use_datastore: Optional[Any] = ...,\n global_cache_timeout: Optional[Any] = ...,\n use_memcache: Optional[Any] = ...,\n memcache_timeout: Optional[Any] = ...,\n max_memcache_items: Optional[Any] = ...,\n force_writes: Optional[Any] = ...,\n _options: Optional[Any] = ...,\n ): ...\n @classmethod\n def from_old_key(cls, old_key: Any) -> None: ...\n def to_old_key(self) -> None: ...\n" }, { "alpha_fraction": 0.6324480772018433, "alphanum_fraction": 0.6343207359313965, "avg_line_length": 40.65957260131836, "blob_id": "a3d3686960d7a8e7460b5bd30ae71664ee13420e", "content_id": "f0559a7e3441a5fbe17aa2e8c8d7178aede3953b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5874, "license_type": "permissive", "max_line_length": 108, "num_lines": 141, "path": "/InMemoryCloudDatastoreStub/_in_memory_store.py", "repo_name": "ZachOrr/google-cloud-datastore-stub", "src_encoding": "UTF-8", "text": "import copy\nimport uuid\nfrom google.cloud.datastore_v1 import types\nfrom typing import Dict, Iterable, Optional, Tuple, List\nfrom ._stored_object import _StoredObject\nfrom ._transactions import _InFlightTransaction, _TransactionType\n\n\nclass _InMemoryStore(object):\n\n _seqid: int\n _store: Dict[str, _StoredObject]\n _transactions: Dict[bytes, _InFlightTransaction]\n\n def __init__(self) -> None:\n self._seqid = 0\n self._store = {}\n self._transactions = {}\n\n def seqid(self, transaction_id: Optional[bytes]) -> int:\n if transaction_id and transaction_id in self._transactions:\n return self._transactions[transaction_id].initial_seqid\n return self._seqid\n\n def put(\n self,\n ds_entity: types.Entity,\n entity_version: int,\n transaction_id: Optional[bytes],\n ) -> None:\n key_str = ds_entity.key.SerializeToString()\n if transaction_id and transaction_id in self._transactions:\n mutation = types.Mutation(upsert=ds_entity,)\n self._transactions[transaction_id].mutations.append(mutation)\n else:\n self._seqid += 1\n self._store[key_str] = _StoredObject(\n entity=ds_entity, version=entity_version\n )\n\n def get(\n self, key: types.Key, transaction_id: Optional[bytes]\n ) -> Optional[_StoredObject]:\n key_str = key.SerializeToString()\n if transaction_id and transaction_id in self._transactions:\n return self._transactions[transaction_id].snapshot.get(key_str)\n else:\n return self._store.get(key_str)\n\n def delete(self, key: types.Key, transaction_id: Optional[bytes]) -> None:\n key_str = key.SerializeToString()\n if transaction_id and transaction_id in self._transactions:\n mutation = types.Mutation(delete=key,)\n self._transactions[transaction_id].mutations.append(mutation)\n else:\n if key_str in self._store:\n del self._store[key_str]\n\n def items(self, transaction_id: bytes) -> Iterable[Tuple[str, _StoredObject]]:\n if transaction_id in self._transactions:\n return self._transactions[transaction_id].snapshot.items()\n else:\n return self._store.items()\n\n def beginTransaction(self, mode: _TransactionType) -> bytes:\n transaction_id = uuid.uuid1().bytes\n self._transactions[transaction_id] = _InFlightTransaction(\n mode=mode,\n initial_seqid=self._seqid,\n snapshot=copy.deepcopy(self._store),\n mutations=[],\n )\n return transaction_id\n\n def commitTransaction(\n self, transaction_id: bytes, final_mutations: List[types.Mutation]\n ) -> List[types.MutationResult]:\n if transaction_id != b\"\" and transaction_id in self._transactions:\n transaction = self._transactions[transaction_id]\n\n # Apply OCC (make sure no entities touched in the transaction have been modified since it began)\n # For now, just assume that no writes at all are allowed\n assert self._seqid == transaction.initial_seqid\n\n for mutation in transaction.mutations:\n self._applyMutation(mutation)\n\n del self._transactions[transaction_id]\n\n return [self._applyMutation(m) for m in final_mutations]\n\n def rollbackTransaction(self, transaction_id: bytes) -> None:\n assert transaction_id in self._transactions\n del self._transactions[transaction_id]\n\n def _applyMutation(self, mutation: types.Mutation) -> types.MutationResult:\n # TODO will need to potentially do key assignment for insert/upsert\n mutation_key = self._mutation_key(mutation)\n existing_data = self.get(mutation_key, None)\n operation = mutation.WhichOneof(\"operation\")\n if operation == \"insert\":\n # TODO figure out how to properly express error handling\n assert existing_data is None\n self.put(mutation.insert, 0, None)\n return types.MutationResult(key=mutation_key, version=0)\n elif operation == \"update\":\n # TODO figure out better error handling\n assert existing_data is not None\n if existing_data.version != mutation.base_version:\n return self._mutation_conflict(mutation_key, existing_data.version)\n new_version = existing_data.version + 1\n self.put(mutation.upsert, new_version, None)\n return types.MutationResult(key=mutation_key, version=new_version)\n elif operation == \"upsert\":\n if existing_data and existing_data.version != mutation.base_version:\n return self._mutation_conflict(mutation_key, existing_data.version)\n new_version = existing_data.version + 1 if existing_data else 0\n self.put(mutation.upsert, new_version, None)\n return types.MutationResult(key=mutation_key, version=new_version)\n elif operation == \"delete\":\n new_version = existing_data.version + 1 if existing_data else 0\n self.delete(mutation_key, None)\n return types.MutationResult(key=mutation_key, version=new_version)\n\n def _mutation_key(self, mutation: types.Mutation) -> types.Key:\n operation = mutation.WhichOneof(\"operation\")\n if operation == \"insert\":\n return mutation.insert.key\n elif operation == \"update\":\n return mutation.update.key\n elif operation == \"upsert\":\n return mutation.upsert.key\n elif operation == \"delete\":\n return mutation.delete.key\n\n def _mutation_conflict(\n self, key: types.Key, old_version: int\n ) -> types.MutationResult:\n return types.MutationResult(\n key=key, version=old_version, conflict_detected=True,\n )\n" }, { "alpha_fraction": 0.7904411554336548, "alphanum_fraction": 0.7904411554336548, "avg_line_length": 17.133333206176758, "blob_id": "f7857ed7c3fe81435ccbf2a4182513c586fe97e4", "content_id": "cd9fa514b1800336358cd0e4e2bac1de9241fb5b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 272, "license_type": "permissive", "max_line_length": 23, "num_lines": 15, "path": "/stubs/google/cloud/datastore_v1/proto/query_pb2.pyi", "repo_name": "ZachOrr/google-cloud-datastore-stub", "src_encoding": "UTF-8", "text": "from typing import Any\n\nDESCRIPTOR = Any\nEntityResult = Any\nQuery = Any\nKindExpression = Any\nPropertyReference = Any\nProjection = Any\nPropertyOrder = Any\nFilter = Any\nCompositeFilter = Any\nPropertyFilter = Any\nGqlQuery = Any\nGqlQueryParameter = Any\nQueryResultBatch = Any\n" }, { "alpha_fraction": 0.6102719306945801, "alphanum_fraction": 0.6102719306945801, "avg_line_length": 29.090909957885742, "blob_id": "c946aec2bd01f1adbf48835e61a843a61f5c2f2f", "content_id": "b413b9e9fbeeb5951d94ca9b91af89ce76345a18", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 331, "license_type": "permissive", "max_line_length": 59, "num_lines": 11, "path": "/stubs/google/cloud/ndb/_remote.pyi", "repo_name": "ZachOrr/google-cloud-datastore-stub", "src_encoding": "UTF-8", "text": "from google.cloud.ndb import exceptions as exceptions\nfrom typing import Any\n\nclass RemoteCall:\n future: Any = ...\n info: Any = ...\n def __init__(self, future: Any, info: Any) -> None: ...\n def exception(self): ...\n def result(self): ...\n def add_done_callback(self, callback: Any): ...\n def cancel(self): ...\n" }, { "alpha_fraction": 0.5951817035675049, "alphanum_fraction": 0.5971564054489136, "avg_line_length": 29.878047943115234, "blob_id": "67eeb2483967bca2bf9c8f0e3f759a1a4fa07b3f", "content_id": "e9afe71e332851f913ae9b6cc762ba0f4771ede1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2532, "license_type": "permissive", "max_line_length": 88, "num_lines": 82, "path": "/stubs/google/cloud/ndb/_datastore_api.pyi", "repo_name": "ZachOrr/google-cloud-datastore-stub", "src_encoding": "UTF-8", "text": "from google.cloud.datastore import helpers as helpers\nfrom google.cloud.datastore_v1.proto import (\n datastore_pb2 as datastore_pb2,\n entity_pb2 as entity_pb2,\n)\nfrom google.cloud.ndb import tasklets as tasklets\nfrom typing import Any, Optional\n\nEVENTUAL: Any\nEVENTUAL_CONSISTENCY = EVENTUAL\nSTRONG: Any\nlog: Any\n\ndef stub(): ...\ndef make_call(\n rpc_name: Any,\n request: Any,\n retries: Optional[Any] = ...,\n timeout: Optional[Any] = ...,\n): ...\ndef lookup(key: Any, options: Any) -> None: ...\n\nclass _LookupBatch:\n options: Any = ...\n todo: Any = ...\n def __init__(self, options: Any) -> None: ...\n def full(self): ...\n def add(self, key: Any): ...\n def idle_callback(self) -> None: ...\n def lookup_callback(self, rpc: Any) -> None: ...\n\ndef get_read_options(options: Any, default_read_consistency: Optional[Any] = ...): ...\ndef put(entity: Any, options: Any) -> None: ...\ndef delete(key: Any, options: Any) -> None: ...\n\nclass _NonTransactionalCommitBatch:\n options: Any = ...\n mutations: Any = ...\n futures: Any = ...\n def __init__(self, options: Any) -> None: ...\n def full(self): ...\n def put(self, entity_pb: Any): ...\n def delete(self, key: Any): ...\n def idle_callback(self) -> None: ...\n\ndef commit(\n transaction: Any, retries: Optional[Any] = ..., timeout: Optional[Any] = ...\n): ...\n\nclass _TransactionalCommitBatch(_NonTransactionalCommitBatch):\n transaction: Any = ...\n allocating_ids: Any = ...\n incomplete_mutations: Any = ...\n incomplete_futures: Any = ...\n def __init__(self, transaction: Any, options: Any) -> None: ...\n def put(self, entity_pb: Any): ...\n def delete(self, key: Any): ...\n def idle_callback(self) -> None: ...\n def allocate_ids_callback(self, rpc: Any, mutations: Any, futures: Any) -> None: ...\n def commit(\n self, retries: Optional[Any] = ..., timeout: Optional[Any] = ...\n ) -> None: ...\n\ndef allocate(keys: Any, options: Any): ...\n\nclass _AllocateIdsBatch:\n options: Any = ...\n keys: Any = ...\n futures: Any = ...\n def __init__(self, options: Any) -> None: ...\n def full(self): ...\n def room_left(self): ...\n def add(self, keys: Any): ...\n def idle_callback(self) -> None: ...\n def allocate_ids_callback(self, rpc: Any) -> None: ...\n\ndef begin_transaction(\n read_only: Any, retries: Optional[Any] = ..., timeout: Optional[Any] = ...\n) -> None: ...\ndef rollback(\n transaction: Any, retries: Optional[Any] = ..., timeout: Optional[Any] = ...\n) -> None: ...\n" }, { "alpha_fraction": 0.6875, "alphanum_fraction": 0.6928191781044006, "avg_line_length": 25.85714340209961, "blob_id": "4c1879409166764bbaf3ded285576083e6082eed", "content_id": "b230cc71e9d8390ddee1bf3f03bea4794bcf9bc1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 752, "license_type": "permissive", "max_line_length": 58, "num_lines": 28, "path": "/tests/transaction_test.py", "repo_name": "ZachOrr/google-cloud-datastore-stub", "src_encoding": "UTF-8", "text": "from InMemoryCloudDatastoreStub import datastore_stub\nfrom tests.models import SimpleModel\n\n\ndef test_get_or_insert_existing_by_id(\n ndb_stub: datastore_stub.LocalDatastoreStub,\n) -> None:\n model = SimpleModel(id=\"test\", str_prop=\"asdf\",)\n ndb_stub._insert_model(model)\n\n model_res = SimpleModel.get_or_insert(\"test\")\n assert model_res == model\n\n\ndef test_get_or_insert_doesnt_exist() -> None:\n model_res = SimpleModel.get_or_insert(\"test\")\n assert model_res is not None\n\n\ndef test_get_or_insert_then_update() -> None:\n model = SimpleModel.get_or_insert(\"test\", int_prop=10)\n assert model is not None\n\n model.int_prop = 20\n model.put()\n\n sanity_check = SimpleModel.get_by_id(\"test\")\n assert sanity_check == model\n" }, { "alpha_fraction": 0.5880077481269836, "alphanum_fraction": 0.5880077481269836, "avg_line_length": 16.233333587646484, "blob_id": "76aeaf5ebf13bd58b3366ecffc635f24b3f7a120", "content_id": "92bbd17eea6b201866819118455d9f10586c8f48", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 517, "license_type": "permissive", "max_line_length": 38, "num_lines": 30, "path": "/InMemoryCloudDatastoreStub/futures.py", "repo_name": "ZachOrr/google-cloud-datastore-stub", "src_encoding": "UTF-8", "text": "import grpc\n\n\nclass InstantFuture(grpc.Future):\n def __init__(self, resp):\n self.resp = resp\n\n def cancel(self):\n return False\n\n def cancelled(self):\n return False\n\n def running(self):\n return False\n\n def done(self):\n return True\n\n def result(self, timeout=None):\n return self.resp\n\n def exception(self, timeout=None):\n return None\n\n def traceback(self, timeout=None):\n return None\n\n def add_done_callback(self, fn):\n fn(self)\n" }, { "alpha_fraction": 0.6491228342056274, "alphanum_fraction": 0.6491228342056274, "avg_line_length": 27.5, "blob_id": "cdccd1b0c9ebd120a6acba3e80c3fa1c436fe2d7", "content_id": "0ee0aa159c2bd123798863a566c796c8b47bf559", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 114, "license_type": "permissive", "max_line_length": 62, "num_lines": 4, "path": "/stubs/google/cloud/ndb/django_middleware.pyi", "repo_name": "ZachOrr/google-cloud-datastore-stub", "src_encoding": "UTF-8", "text": "from typing import Any\n\nclass NdbDjangoMiddleware:\n def __init__(self, *args: Any, **kwargs: Any) -> None: ...\n" }, { "alpha_fraction": 0.5654703974723816, "alphanum_fraction": 0.5654703974723816, "avg_line_length": 34.147727966308594, "blob_id": "54e7413c16b2d06347314b36db2def9c5efd52b6", "content_id": "a78735fba482e8e500d43b7ba11158a7012b2a85", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3093, "license_type": "permissive", "max_line_length": 70, "num_lines": 88, "path": "/stubs/google/cloud/ndb/context.pyi", "repo_name": "ZachOrr/google-cloud-datastore-stub", "src_encoding": "UTF-8", "text": "import threading\nfrom collections import namedtuple\nfrom typing import Any, Optional\n\nclass _LocalState(threading.local):\n context: Any = ...\n def __init__(self) -> None: ...\n\ndef get_context(raise_context_error: bool = ...): ...\n\n_ContextTuple = namedtuple(\n \"_ContextTuple\",\n [\n \"client\",\n \"namespace\",\n \"eventloop\",\n \"batches\",\n \"commit_batches\",\n \"transaction\",\n \"cache\",\n \"global_cache\",\n \"on_commit_callbacks\",\n \"legacy_data\",\n ],\n)\n\nclass _Context(_ContextTuple):\n def __new__(\n cls,\n client: Any,\n namespace: Any = ...,\n eventloop: Optional[Any] = ...,\n batches: Optional[Any] = ...,\n commit_batches: Optional[Any] = ...,\n transaction: Optional[Any] = ...,\n cache: Optional[Any] = ...,\n cache_policy: Optional[Any] = ...,\n global_cache: Optional[Any] = ...,\n global_cache_policy: Optional[Any] = ...,\n global_cache_timeout_policy: Optional[Any] = ...,\n datastore_policy: Optional[Any] = ...,\n on_commit_callbacks: Optional[Any] = ...,\n legacy_data: bool = ...,\n ): ...\n def new(self, **kwargs: Any): ...\n def use(self) -> None: ...\n\nclass Context(_Context):\n def clear_cache(self) -> None: ...\n def flush(self) -> None: ...\n def get_namespace(self): ...\n def get_cache_policy(self): ...\n def get_datastore_policy(self) -> None: ...\n def get_global_cache_policy(self): ...\n get_memcache_policy: Any = ...\n def get_global_cache_timeout_policy(self): ...\n get_memcache_timeout_policy: Any = ...\n cache_policy: Any = ...\n def set_cache_policy(self, policy: Any): ...\n datastore_policy: Any = ...\n def set_datastore_policy(self, policy: Any): ...\n global_cache_policy: Any = ...\n def set_global_cache_policy(self, policy: Any): ...\n set_memcache_policy: Any = ...\n global_cache_timeout_policy: Any = ...\n def set_global_cache_timeout_policy(self, policy: Any): ...\n set_memcache_timeout_policy: Any = ...\n def call_on_commit(self, callback: Any) -> None: ...\n def in_transaction(self): ...\n def memcache_add(self, *args: Any, **kwargs: Any) -> None: ...\n def memcache_cas(self, *args: Any, **kwargs: Any) -> None: ...\n def memcache_decr(self, *args: Any, **kwargs: Any) -> None: ...\n def memcache_delete(self, *args: Any, **kwargs: Any) -> None: ...\n def memcache_get(self, *args: Any, **kwargs: Any) -> None: ...\n def memcache_gets(self, *args: Any, **kwargs: Any) -> None: ...\n def memcache_incr(self, *args: Any, **kwargs: Any) -> None: ...\n def memcache_replace(self, *args: Any, **kwargs: Any) -> None: ...\n def memcache_set(self, *args: Any, **kwargs: Any) -> None: ...\n def urlfetch(self, *args: Any, **kwargs: Any) -> None: ...\n\nclass ContextOptions:\n def __init__(self, *args: Any, **kwargs: Any) -> None: ...\n\nclass TransactionOptions:\n def __init__(self, *args: Any, **kwargs: Any) -> None: ...\n\nclass AutoBatcher:\n def __init__(self, *args: Any, **kwargs: Any) -> None: ...\n" }, { "alpha_fraction": 0.5436308979988098, "alphanum_fraction": 0.5453025698661804, "avg_line_length": 28.038835525512695, "blob_id": "df08c9457a88bd13fe7040ac651b5e1e726a04a4", "content_id": "1a01f7a08064f8b923b45cb1d707007d1b046776", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2991, "license_type": "permissive", "max_line_length": 80, "num_lines": 103, "path": "/stubs/google/cloud/datastore/query.pyi", "repo_name": "ZachOrr/google-cloud-datastore-stub", "src_encoding": "UTF-8", "text": "from google.api_core import page_iterator as page_iterator\nfrom google.cloud._helpers import _ensure_tuple_or_list as _ensure_tuple_or_list\nfrom google.cloud.datastore import helpers as helpers\nfrom google.cloud.datastore.key import Key as Key\nfrom google.cloud.datastore_v1.proto import (\n entity_pb2 as entity_pb2,\n query_pb2 as query_pb2,\n)\nfrom typing import Any, Optional\n\n_NOT_FINISHED: Any\n_NO_MORE_RESULTS: Any\n_FINISHED: Any\n\nclass Query:\n OPERATORS: Any = ...\n _client: Any = ...\n _kind: Any = ...\n _project: Any = ...\n _namespace: Any = ...\n _ancestor: Any = ...\n _filters: Any = ...\n _projection: Any = ...\n _order: Any = ...\n _distinct_on: Any = ...\n def __init__(\n self,\n client: Any,\n kind: Optional[Any] = ...,\n project: Optional[Any] = ...,\n namespace: Optional[Any] = ...,\n ancestor: Optional[Any] = ...,\n filters: Any = ...,\n projection: Any = ...,\n order: Any = ...,\n distinct_on: Any = ...,\n ) -> None: ...\n @property\n def project(self): ...\n @property\n def namespace(self): ...\n @namespace.setter\n def namespace(self, value: Any) -> None: ...\n @property\n def kind(self): ...\n @kind.setter\n def kind(self, value: Any) -> None: ...\n @property\n def ancestor(self): ...\n @ancestor.setter\n def ancestor(self, value: Any) -> None: ...\n def ancestor(self) -> None: ...\n @property\n def filters(self): ...\n def add_filter(self, property_name: Any, operator: Any, value: Any): ...\n @property\n def projection(self): ...\n @projection.setter\n def projection(self, projection: Any) -> None: ...\n def keys_only(self) -> None: ...\n def key_filter(self, key: Any, operator: str = ...) -> None: ...\n @property\n def order(self): ...\n @order.setter\n def order(self, value: Any) -> None: ...\n @property\n def distinct_on(self): ...\n @distinct_on.setter\n def distinct_on(self, value: Any) -> None: ...\n def fetch(\n self,\n limit: Optional[Any] = ...,\n offset: int = ...,\n start_cursor: Optional[Any] = ...,\n end_cursor: Optional[Any] = ...,\n client: Optional[Any] = ...,\n eventual: bool = ...,\n ): ...\n\nclass Iterator(page_iterator.Iterator):\n next_page_token: Any = ...\n _query: Any = ...\n _offset: Any = ...\n _end_cursor: Any = ...\n _eventual: Any = ...\n _more_results: bool = ...\n _skipped_results: int = ...\n def __init__(\n self,\n query: Any,\n client: Any,\n limit: Optional[Any] = ...,\n offset: Optional[Any] = ...,\n start_cursor: Optional[Any] = ...,\n end_cursor: Optional[Any] = ...,\n eventual: bool = ...,\n ) -> None: ...\n def _build_protobuf(self): ...\n def _process_query_results(self, response_pb: Any): ...\n def _next_page(self): ...\n\ndef _pb_from_query(query: Any): ...\ndef _item_to_entity(iterator: Any, entity_pb: Any): ...\n" }, { "alpha_fraction": 0.6464088559150696, "alphanum_fraction": 0.7292817831039429, "avg_line_length": 29.16666603088379, "blob_id": "7d5309381b3d402caa9d6310fd24be5aadac8ce5", "content_id": "28ebc52faa9e80ead6a31f2a408982ab88571e67", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 181, "license_type": "permissive", "max_line_length": 66, "num_lines": 6, "path": "/.flake8", "repo_name": "ZachOrr/google-cloud-datastore-stub", "src_encoding": "UTF-8", "text": "[flake8]\n# Make flake play well with black\n# https://black.readthedocs.io/en/stable/the_black_code_style.html\nmax-line-length = 88\nselect = C,E,F,W,B,B950\nignore = E203, E501, W503\n" }, { "alpha_fraction": 0.5853474140167236, "alphanum_fraction": 0.5868580341339111, "avg_line_length": 27.17021369934082, "blob_id": "cd385b60d40fe894b6f620ec854f1cbaaa3b9c77", "content_id": "4e92c5ff80d367ac19d69d69acea822a5438ecd4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1324, "license_type": "permissive", "max_line_length": 88, "num_lines": 47, "path": "/stubs/google/cloud/datastore/_http.pyi", "repo_name": "ZachOrr/google-cloud-datastore-stub", "src_encoding": "UTF-8", "text": "from google.cloud import exceptions as exceptions\nfrom google.rpc import status_pb2 as status_pb2\nfrom typing import Any, Optional\n\nDATASTORE_API_HOST: str\nAPI_BASE_URL: Any\nAPI_VERSION: str\nAPI_URL_TEMPLATE: str\n\ndef _request(\n http: Any, project: Any, method: Any, data: Any, base_url: Any, client_info: Any\n): ...\ndef _rpc(\n http: Any,\n project: Any,\n method: Any,\n base_url: Any,\n client_info: Any,\n request_pb: Any,\n response_pb_cls: Any,\n): ...\ndef build_api_url(project: Any, method: Any, base_url: Any): ...\n\nclass HTTPDatastoreAPI:\n client: Any = ...\n def __init__(self, client: Any) -> None: ...\n def lookup(self, project_id: Any, keys: Any, read_options: Optional[Any] = ...): ...\n def run_query(\n self,\n project_id: Any,\n partition_id: Any,\n read_options: Optional[Any] = ...,\n query: Optional[Any] = ...,\n gql_query: Optional[Any] = ...,\n ): ...\n def begin_transaction(\n self, project_id: Any, transaction_options: Optional[Any] = ...\n ): ...\n def commit(\n self,\n project_id: Any,\n mode: Any,\n mutations: Any,\n transaction: Optional[Any] = ...,\n ): ...\n def rollback(self, project_id: Any, transaction: Any): ...\n def allocate_ids(self, project_id: Any, keys: Any): ...\n" }, { "alpha_fraction": 0.5703421831130981, "alphanum_fraction": 0.5741444826126099, "avg_line_length": 25.299999237060547, "blob_id": "e1d61f02d249b561a860a16bf14611cc46afd04d", "content_id": "43ec51c89c054eaa1af9609b37fb7cda89451376", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 789, "license_type": "permissive", "max_line_length": 84, "num_lines": 30, "path": "/stubs/google/cloud/datastore_v1/gapic/transports/datastore_grpc_transport.pyi", "repo_name": "ZachOrr/google-cloud-datastore-stub", "src_encoding": "UTF-8", "text": "from google.cloud.datastore_v1.proto import datastore_pb2_grpc as datastore_pb2_grpc\nfrom typing import Any, Optional\n\nclass DatastoreGrpcTransport:\n def __init__(\n self,\n channel: Optional[Any] = ...,\n credentials: Optional[Any] = ...,\n address: str = ...,\n ) -> None: ...\n @classmethod\n def create_channel(\n cls, address: str = ..., credentials: Optional[Any] = ..., **kwargs: Any\n ): ...\n @property\n def channel(self): ...\n @property\n def lookup(self): ...\n @property\n def run_query(self): ...\n @property\n def begin_transaction(self): ...\n @property\n def commit(self): ...\n @property\n def rollback(self): ...\n @property\n def allocate_ids(self): ...\n @property\n def reserve_ids(self): ...\n" }, { "alpha_fraction": 0.6095208525657654, "alphanum_fraction": 0.6098319888114929, "avg_line_length": 32.83157730102539, "blob_id": "eadd675bdad31779e83d396e52d4b5082854a85c", "content_id": "82c176919a1a71d5469431607492d0292ede655b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3214, "license_type": "permissive", "max_line_length": 82, "num_lines": 95, "path": "/stubs/google/cloud/datastore/client.pyi", "repo_name": "ZachOrr/google-cloud-datastore-stub", "src_encoding": "UTF-8", "text": "from google.api_core.gapic_v1 import client_info as client_info\nfrom google.cloud._helpers import _LocalStack as _LocalStack\nfrom google.cloud.client import ClientWithProject as ClientWithProject\nfrom google.cloud.datastore import __version__ as __version__, helpers as helpers\nfrom google.cloud.datastore._gapic import make_datastore_api as make_datastore_api\nfrom google.cloud.datastore._http import HTTPDatastoreAPI as HTTPDatastoreAPI\nfrom google.cloud.datastore.batch import Batch as Batch\nfrom google.cloud.datastore.entity import Entity as Entity\nfrom google.cloud.datastore.key import Key as Key\nfrom google.cloud.datastore.query import Query as Query\nfrom google.cloud.datastore.transaction import Transaction as Transaction\nfrom google.cloud.environment_vars import (\n DISABLE_GRPC as DISABLE_GRPC,\n GCD_DATASET as GCD_DATASET,\n GCD_HOST as GCD_HOST,\n)\nfrom typing import Any, Optional\n\n_HAVE_GRPC: bool\n_CLIENT_INFO: Any\n_MAX_LOOPS: int\n_DATASTORE_BASE_URL: str\n_USE_GRPC: Any\n\ndef _get_gcd_project(): ...\ndef _determine_default_project(project: Optional[Any] = ...): ...\ndef _extended_lookup(\n datastore_api: Any,\n project: Any,\n key_pbs: Any,\n missing: Optional[Any] = ...,\n deferred: Optional[Any] = ...,\n eventual: bool = ...,\n transaction_id: Optional[Any] = ...,\n): ...\n\nclass Client(ClientWithProject):\n SCOPE: Any = ...\n namespace: Any = ...\n _client_info: Any = ...\n _client_options: Any = ...\n _batch_stack: Any = ...\n _datastore_api_internal: Any = ...\n _use_grpc: Any = ...\n _base_url: Any = ...\n def __init__(\n self,\n project: Optional[Any] = ...,\n namespace: Optional[Any] = ...,\n credentials: Optional[Any] = ...,\n client_info: Any = ...,\n client_options: Optional[Any] = ...,\n _http: Optional[Any] = ...,\n _use_grpc: Optional[Any] = ...,\n ) -> None: ...\n @staticmethod\n def _determine_default(project: Any): ...\n @property\n def base_url(self): ...\n @base_url.setter\n def base_url(self, value: Any) -> None: ...\n @property\n def _datastore_api(self): ...\n def _push_batch(self, batch: Any) -> None: ...\n def _pop_batch(self): ...\n @property\n def current_batch(self): ...\n @property\n def current_transaction(self): ...\n def get(\n self,\n key: Any,\n missing: Optional[Any] = ...,\n deferred: Optional[Any] = ...,\n transaction: Optional[Any] = ...,\n eventual: bool = ...,\n ): ...\n def get_multi(\n self,\n keys: Any,\n missing: Optional[Any] = ...,\n deferred: Optional[Any] = ...,\n transaction: Optional[Any] = ...,\n eventual: bool = ...,\n ): ...\n def put(self, entity: Any) -> None: ...\n def put_multi(self, entities: Any) -> None: ...\n def delete(self, key: Any) -> None: ...\n def delete_multi(self, keys: Any) -> None: ...\n def allocate_ids(self, incomplete_key: Any, num_ids: Any): ...\n def key(self, *path_args: Any, **kwargs: Any): ...\n def batch(self): ...\n def transaction(self, **kwargs: Any): ...\n def query(self, **kwargs: Any): ...\n def reserve_ids(self, complete_key: Any, num_ids: Any) -> None: ...\n" }, { "alpha_fraction": 0.5521582961082458, "alphanum_fraction": 0.5521582961082458, "avg_line_length": 30.77142906188965, "blob_id": "66b13e3b79284362ac8dbf5e5e8ae077016c9869", "content_id": "fff2d733c2621bbee56fe019ccfe8286e02ebc64", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1112, "license_type": "permissive", "max_line_length": 77, "num_lines": 35, "path": "/stubs/google/cloud/datastore/batch.pyi", "repo_name": "ZachOrr/google-cloud-datastore-stub", "src_encoding": "UTF-8", "text": "from google.cloud.datastore import helpers as helpers\nfrom typing import Any\n\nclass Batch:\n _id: Any = ...\n _INITIAL: int = ...\n _IN_PROGRESS: int = ...\n _ABORTED: int = ...\n _FINISHED: int = ...\n _client: Any = ...\n _mutations: Any = ...\n _partial_key_entities: Any = ...\n _status: Any = ...\n def __init__(self, client: Any) -> None: ...\n def current(self): ...\n @property\n def project(self): ...\n @property\n def namespace(self): ...\n def _add_partial_key_entity_pb(self): ...\n def _add_complete_key_entity_pb(self): ...\n def _add_delete_key_pb(self): ...\n @property\n def mutations(self): ...\n def put(self, entity: Any) -> None: ...\n def delete(self, key: Any) -> None: ...\n def begin(self) -> None: ...\n def _commit(self) -> None: ...\n def commit(self) -> None: ...\n def rollback(self) -> None: ...\n def __enter__(self): ...\n def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: ...\n\ndef _assign_entity_to_pb(entity_pb: Any, entity: Any) -> None: ...\ndef _parse_commit_response(commit_response_pb: Any): ...\n" }, { "alpha_fraction": 0.5886031985282898, "alphanum_fraction": 0.5886031985282898, "avg_line_length": 35.89743423461914, "blob_id": "bf9ff8a65daea088de91dc51c62fbca25f6cc32b", "content_id": "994c36ecfa5ac5aaa68042bc2e008c37ec9a4178", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1439, "license_type": "permissive", "max_line_length": 85, "num_lines": 39, "path": "/stubs/google/cloud/ndb/global_cache.pyi", "repo_name": "ZachOrr/google-cloud-datastore-stub", "src_encoding": "UTF-8", "text": "import abc\nfrom collections import namedtuple\nfrom typing import Any, Optional\n\nclass GlobalCache(metaclass=abc.ABCMeta):\n __metaclass__: Any = ...\n @abc.abstractmethod\n def get(self, keys: Any) -> Any: ...\n @abc.abstractmethod\n def set(self, items: Any, expires: Optional[Any] = ...) -> Any: ...\n @abc.abstractmethod\n def delete(self, keys: Any) -> Any: ...\n @abc.abstractmethod\n def watch(self, keys: Any) -> Any: ...\n @abc.abstractmethod\n def compare_and_swap(self, items: Any, expires: Optional[Any] = ...) -> Any: ...\n\nclass _InProcessGlobalCache(GlobalCache):\n cache: Any = ...\n def __init__(self) -> None: ...\n def get(self, keys: Any): ...\n def set(self, items: Any, expires: Optional[Any] = ...) -> None: ...\n def delete(self, keys: Any) -> None: ...\n def watch(self, keys: Any) -> None: ...\n def compare_and_swap(self, items: Any, expires: Optional[Any] = ...) -> None: ...\n\n_Pipeline = namedtuple(\"_Pipeline\", [\"pipe\", \"id\"])\n\nclass RedisCache(GlobalCache):\n @classmethod\n def from_environment(cls): ...\n redis: Any = ...\n pipes: Any = ...\n def __init__(self, redis: Any) -> None: ...\n def get(self, keys: Any): ...\n def set(self, items: Any, expires: Optional[Any] = ...) -> None: ...\n def delete(self, keys: Any) -> None: ...\n def watch(self, keys: Any) -> None: ...\n def compare_and_swap(self, items: Any, expires: Optional[Any] = ...): ...\n" }, { "alpha_fraction": 0.7349081635475159, "alphanum_fraction": 0.7427821755409241, "avg_line_length": 22.8125, "blob_id": "6af5eca28df6621024f95ebd981f43c49d67c6f5", "content_id": "4b48fb5a10c65bf626419e701883a3020f93298d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 381, "license_type": "permissive", "max_line_length": 43, "num_lines": 16, "path": "/InMemoryCloudDatastoreStub/_transactions.py", "repo_name": "ZachOrr/google-cloud-datastore-stub", "src_encoding": "UTF-8", "text": "import enum\nfrom google.cloud.datastore_v1 import types\nfrom typing import Dict, List, NamedTuple\nfrom ._stored_object import _StoredObject\n\n\nclass _TransactionType(enum.Enum):\n READ_ONLY = 0\n READ_WRITE = 1\n\n\nclass _InFlightTransaction(NamedTuple):\n mode: _TransactionType\n initial_seqid: int\n snapshot: Dict[str, _StoredObject]\n mutations: List[types.Mutation]\n" }, { "alpha_fraction": 0.6056475043296814, "alphanum_fraction": 0.6095423698425293, "avg_line_length": 33.233333587646484, "blob_id": "0207c5364e86498c7c248c6439b0c8c126c1c169", "content_id": "3274e59d3b32fe4fbdb96ee0ec5546b24a7b7aec", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1027, "license_type": "permissive", "max_line_length": 86, "num_lines": 30, "path": "/stubs/google/cloud/ndb/client.pyi", "repo_name": "ZachOrr/google-cloud-datastore-stub", "src_encoding": "UTF-8", "text": "from google.api_core import client_info as client_info\nfrom google.cloud import client as google_client, environment_vars as environment_vars\nfrom google.cloud.datastore_v1.gapic import datastore_client as datastore_client\nfrom google.cloud.datastore_v1.proto import datastore_pb2_grpc as datastore_pb2_grpc\nfrom typing import Any, Optional\n\nDATASTORE_API_HOST: Any\n\nclass Client(google_client.ClientWithProject):\n SCOPE: Any = ...\n namespace: Any = ...\n host: Any = ...\n client_info: Any = ...\n secure: Any = ...\n stub: Any = ...\n def __init__(\n self,\n project: Optional[Any] = ...,\n namespace: Optional[Any] = ...,\n credentials: Optional[Any] = ...,\n ) -> None: ...\n def context(\n self,\n namespace: Any = ...,\n cache_policy: Optional[Any] = ...,\n global_cache: Optional[Any] = ...,\n global_cache_policy: Optional[Any] = ...,\n global_cache_timeout_policy: Optional[Any] = ...,\n legacy_data: bool = ...,\n ) -> None: ...\n" }, { "alpha_fraction": 0.5319105982780457, "alphanum_fraction": 0.5319105982780457, "avg_line_length": 32.46938705444336, "blob_id": "b566f7d9a8ba58e92d330f6f525889a8d90c2b01", "content_id": "2347a95f45e7c825d4d120eeeeb40f0b01db8f76", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4920, "license_type": "permissive", "max_line_length": 86, "num_lines": 147, "path": "/stubs/google/cloud/ndb/query.pyi", "repo_name": "ZachOrr/google-cloud-datastore-stub", "src_encoding": "UTF-8", "text": "from google.cloud.ndb import _options\nfrom typing import Any, Optional\n\nclass PropertyOrder:\n name: Any = ...\n reverse: Any = ...\n def __init__(self, name: Any, reverse: bool = ...) -> None: ...\n def __neg__(self): ...\n\nclass RepeatedStructuredPropertyPredicate:\n name: Any = ...\n match_keys: Any = ...\n match_values: Any = ...\n def __init__(self, name: Any, match_keys: Any, entity_pb: Any) -> None: ...\n def __call__(self, entity_pb: Any): ...\n\nclass ParameterizedThing:\n def __eq__(self, other: Any) -> Any: ...\n def __ne__(self, other: Any) -> Any: ...\n\nclass Parameter(ParameterizedThing):\n def __init__(self, key: Any) -> None: ...\n def __eq__(self, other: Any) -> Any: ...\n @property\n def key(self): ...\n def resolve(self, bindings: Any, used: Any): ...\n\nclass ParameterizedFunction(ParameterizedThing):\n func: Any = ...\n values: Any = ...\n def __init__(self, func: Any, values: Any) -> None: ...\n def __eq__(self, other: Any) -> Any: ...\n def is_parameterized(self): ...\n def resolve(self, bindings: Any, used: Any): ...\n\nclass Node:\n def __new__(cls): ...\n def __eq__(self, other: Any) -> Any: ...\n def __ne__(self, other: Any) -> Any: ...\n def __le__(self, unused_other: Any) -> Any: ...\n def __lt__(self, unused_other: Any) -> Any: ...\n def __ge__(self, unused_other: Any) -> Any: ...\n def __gt__(self, unused_other: Any) -> Any: ...\n def resolve(self, bindings: Any, used: Any): ...\n\nclass FalseNode(Node):\n def __eq__(self, other: Any) -> Any: ...\n\nclass ParameterNode(Node):\n def __new__(cls, prop: Any, op: Any, param: Any): ...\n def __getnewargs__(self): ...\n def __eq__(self, other: Any) -> Any: ...\n def resolve(self, bindings: Any, used: Any): ...\n\nclass FilterNode(Node):\n def __new__(cls, name: Any, opsymbol: Any, value: Any): ...\n def __getnewargs__(self): ...\n def __eq__(self, other: Any) -> Any: ...\n\nclass PostFilterNode(Node):\n def __new__(cls, predicate: Any): ...\n def __getnewargs__(self): ...\n def __eq__(self, other: Any) -> Any: ...\n\nclass _BooleanClauses:\n name: Any = ...\n combine_or: Any = ...\n or_parts: Any = ...\n def __init__(self, name: Any, combine_or: Any) -> None: ...\n def add_node(self, node: Any) -> None: ...\n\nclass ConjunctionNode(Node):\n def __new__(cls, *nodes: Any): ...\n def __getnewargs__(self): ...\n def __iter__(self) -> Any: ...\n def __eq__(self, other: Any) -> Any: ...\n def resolve(self, bindings: Any, used: Any): ...\n\nclass DisjunctionNode(Node):\n def __new__(cls, *nodes: Any): ...\n def __getnewargs__(self): ...\n def __iter__(self) -> Any: ...\n def __eq__(self, other: Any) -> Any: ...\n def resolve(self, bindings: Any, used: Any): ...\n\nAND = ConjunctionNode\nOR = DisjunctionNode\n\nclass QueryOptions(_options.ReadOptions):\n project: Any = ...\n namespace: Any = ...\n def __init__(\n self, config: Optional[Any] = ..., context: Optional[Any] = ..., **kwargs: Any\n ) -> None: ...\n\nclass Query:\n default_options: Any = ...\n kind: Any = ...\n ancestor: Any = ...\n filters: Any = ...\n order_by: Any = ...\n project: Any = ...\n namespace: Any = ...\n projection: Any = ...\n distinct_on: Any = ...\n def __init__(\n self,\n kind: Optional[Any] = ...,\n filters: Optional[Any] = ...,\n ancestor: Optional[Any] = ...,\n order_by: Optional[Any] = ...,\n orders: Optional[Any] = ...,\n project: Optional[Any] = ...,\n app: Optional[Any] = ...,\n namespace: Optional[Any] = ...,\n projection: Optional[Any] = ...,\n distinct_on: Optional[Any] = ...,\n group_by: Optional[Any] = ...,\n default_options: Optional[Any] = ...,\n ) -> None: ...\n @property\n def is_distinct(self): ...\n def filter(self, *filters: Any): ...\n def order(self, *props: Any): ...\n def analyze(self): ...\n def bind(self, *positional: Any, **keyword: Any): ...\n def fetch(self, limit: Optional[Any] = ..., **kwargs: Any): ...\n def fetch_async(self, limit: Optional[Any] = ..., **kwargs: Any): ...\n def run_to_queue(\n self,\n queue: Any,\n conn: Any,\n options: Optional[Any] = ...,\n dsquery: Optional[Any] = ...,\n ) -> None: ...\n def iter(self, **kwargs: Any): ...\n __iter__: Any = ...\n def map(self, callback: Any, **kwargs: Any): ...\n def map_async(self, callback: Any, **kwargs: Any) -> None: ...\n def get(self, **kwargs: Any): ...\n def get_async(self, **kwargs: Any) -> None: ...\n def count(self, limit: Optional[Any] = ..., **kwargs: Any): ...\n def count_async(self, limit: Optional[Any] = ..., **kwargs: Any) -> None: ...\n def fetch_page(self, page_size: Any, **kwargs: Any): ...\n def fetch_page_async(self, page_size: Any, **kwargs: Any) -> None: ...\n\ndef gql(query_string: Any, *args: Any, **kwds: Any): ...\n" } ]
57
argtoms/discord_simple_afk_bot
https://github.com/argtoms/discord_simple_afk_bot
3d92ca258bb9446606f9e86b62464ac9ff2c709e
985dbc3230b8d18e972f2251d3b21848f46e2aaf
118b6c7c807f156c93e9e50ffc0947fe08bdc2b7
refs/heads/master
2023-03-07T23:32:00.050755
2021-02-28T21:31:44
2021-02-28T21:31:44
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7564102411270142, "alphanum_fraction": 0.7564102411270142, "avg_line_length": 38, "blob_id": "0512bc1e1b60aadfa86715b8b39b4455d21a9d5c", "content_id": "da633aa11c29978548576c9910397d7dc64fee5a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 234, "license_type": "no_license", "max_line_length": 79, "num_lines": 6, "path": "/README.md", "repo_name": "argtoms/discord_simple_afk_bot", "src_encoding": "UTF-8", "text": "# A discord simple AFK bot\nA really simple discord bot that toggles you AFK.\n\nType .afk and it will prefix your nickname with [AFK] - thats all it does.\n\nYou need the discord.py module installed > https://pypi.org/project/discord.py/\n" }, { "alpha_fraction": 0.6403846144676208, "alphanum_fraction": 0.6442307829856873, "avg_line_length": 21.60869598388672, "blob_id": "34bcc40c9a6ae0687ed7ce7b315875a17b9547e5", "content_id": "77a957c9073194c1626beb1e4597f919469bb684", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 520, "license_type": "no_license", "max_line_length": 58, "num_lines": 23, "path": "/bot.py", "repo_name": "argtoms/discord_simple_afk_bot", "src_encoding": "UTF-8", "text": "import discord\nfrom discord.ext import commands\n\nclient = commands.Bot(command_prefix = '.')\n\n@client.event\nasync def on_ready():\n print('Bot is ready.')\n\n@client.command(pass_context=True)\nasync def afk(ctx):\n x = \"[AFK] \"\n current_nick = ctx.author.nick\n if x in current_nick:\n old_nick = current_nick[6:]\n await ctx.author.edit(nick=old_nick)\n else:\n await ctx.author.edit(nick=\"[AFK] \"+current_nick)\n await ctx.channel.purge(limit=1)\n \n \n\nclient.run('enter_your_token')\n" } ]
2
lijingchn/mysite
https://github.com/lijingchn/mysite
1aaa0d9e69ec7060a442ca5f298ec26ba991d9b5
730f1bebfafc55c665208ae088f23853024c48fc
d0d8d2e6d57b628019b32685f6b7e726952f3f47
refs/heads/master
2016-04-26T00:13:57.537415
2016-03-14T13:57:33
2016-03-14T13:57:33
53,635,729
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6170212626457214, "alphanum_fraction": 0.618794322013855, "avg_line_length": 36.599998474121094, "blob_id": "d6a89e996c42cf9ce6393c23a8826748ff1df04d", "content_id": "e1612c0b42ca3b7e4443e5b0420c33db0f11e332", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 564, "license_type": "no_license", "max_line_length": 57, "num_lines": 15, "path": "/users/urls.py", "repo_name": "lijingchn/mysite", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# encoding: utf-8\n\nfrom django.conf.urls import patterns, include, url\n\nurlpatterns = patterns('',\n url(r'^$', 'users.views.first_page'),\n url(r'^user_login/', 'users.views.user_login'),\n url(r'^user_logout/', 'users.views.user_logout'),\n url(r'^diff_response/', 'users.views.diff_response'),\n url(r'^user_only/', 'users.views.user_only'),\n url(r'^specific_user/', 'users.views.specific_user'),\n url(r'^form_process/', 'users.views.form_process'),\n url(r'^register/', 'users.views.register'),\n )\n" }, { "alpha_fraction": 0.6323529481887817, "alphanum_fraction": 0.6352941393852234, "avg_line_length": 27.33333396911621, "blob_id": "4954bad99b2488b9f65054b6fe0d8fdf9dae36ce", "content_id": "b8b9962cb0ac40dc8723ab6cb61588152e247e3d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 340, "license_type": "no_license", "max_line_length": 52, "num_lines": 12, "path": "/west/urls.py", "repo_name": "lijingchn/mysite", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# encoding: utf-8\n\nfrom django.conf.urls import patterns,include,url\n\nurlpatterns = patterns('',\n url(r'^$', 'west.views.first_page'),\n url(r'^staff/', 'west.views.staff'),\n url(r'^templay/', 'west.views.templay'),\n url(r'^form/', 'west.views.form'),\n url(r'^investigate/', 'west.views.investigate'),\n)\n" }, { "alpha_fraction": 0.514159619808197, "alphanum_fraction": 0.5152629613876343, "avg_line_length": 30.069766998291016, "blob_id": "ea9eb484be94d7632c0c31f3e148dc242c4c93f4", "content_id": "2d0cf20e02b8108fec1c9d13aacd273a52f8dd88", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2801, "license_type": "no_license", "max_line_length": 96, "num_lines": 86, "path": "/users/views.py", "repo_name": "lijingchn/mysite", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# encoding: utf-8\n\n\nfrom django.http import HttpResponse\nfrom django.shortcuts import render, redirect\nfrom django.core.context_processors import csrf\nfrom django.contrib.auth import *\nfrom django.contrib.auth.decorators import login_required, user_passes_test\nfrom django.contrib.auth.forms import UserCreationForm\n\n\n# Create your views here.\n\ndef first_page(request):\n return HttpResponse(\"<p>用户要登录啦~~~~~~~~~~~~~~</p>\")\n\n\n# ----------------------------------------------------------------------------------------------\n# login\ndef user_login(request):\n if request.POST:\n username = password = ''\n username = request.POST.get('username')\n password = request.POST.get('password')\n user = authenticate(username=username, password=password)\n print '========================================'\n print user\n if user is not None and user.is_active:\n login(request,user)\n return redirect('/users')\n else:\n return HttpResponse(\"<p>用户名或密码不能为空!</p>\")\n\n ctx = {}\n ctx.update(csrf(request))\n print '.....................................'\n return render(request,'login.html',ctx)\n# return render(request,'user_login.html',ctx)\n\n# logout\ndef user_logout(request):\n logout(request)\n return redirect('/west')\n\n# ----------------------------------------------------------------------------------------------\n# 不同的用户显示不同的内容\ndef diff_response(request):\n if request.user.is_authenticated():\n print request.user.is_authenticated()\n content = \"<p> my dear user ~ </p>\"\n else:\n content = \"<p> hi, stranger. </p>\"\n return HttpResponse(content)\n\n@login_required\ndef user_only(request):\n return HttpResponse(\"<p> This message is for logged in user only. </p>\")\n\ndef name_check(user):\n return user.get_username() == 'lijing'\n\n@user_passes_test(name_check)\ndef specific_user(request):\n return HttpResponse(\"<p> for lijing only </p>\")\n\n\n# ----------------------------------------------------------------------------------------------\ndef form_process(request):\n return render(request, 'form01.html')\n\n\n# ----------------------------------------------------------------------------------------------\n# 用户注册\ndef register(request):\n if request.method == 'POST':\n form = UserCreationForm(request.POST)\n if form.is_valid():\n form.save() # 注册信息存入数据库\n# new_user = form.save()\n return redirect(\"/\")\n else:\n form = UserCreationForm()\n ctx = {'form':form}\n ctx.update(csrf(request))\n return render(request, \"register.html\", ctx)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.583830714225769, "alphanum_fraction": 0.5843732953071594, "avg_line_length": 27.09375, "blob_id": "587ac29d8bf6e700672a1a1446111204dbece130", "content_id": "b95ae1b8aadc0ac6e41f4dc216e20c5a6a0fda6e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1875, "license_type": "no_license", "max_line_length": 76, "num_lines": 64, "path": "/users/views.py.bak", "repo_name": "lijingchn/mysite", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# encoding: utf-8\n\n\nfrom django.http import HttpResponse\nfrom django.shortcuts import render, redirect\nfrom django.core.context_processors import csrf\nfrom django.contrib.auth import *\nfrom django.contrib.auth.decorators import login_required, user_passes_test\n\n\n# Create your views here.\n\ndef first_page(request):\n return HttpResponse(\"<p>用户要登录啦~~~~~~~~~~~~~~</p>\")\n\n\n\n# login\ndef user_login(request):\n if request.POST:\n username = password = ''\n username = request.POST.get('username')\n password = request.POST.get('password')\n user = authenticate(username=username, password=password)\n print '========================================'\n print user\n print 'user user user'\n if user is not None:\n# if user is not None and user.is_active:\n print '======================================='\n login(request,user)\n return redirect('/users')\n# else:\n# return HttpResponse(\"<p>用户名或密码不能为空!</p>\")\n\n ctx = {}\n ctx.update(csrf(request))\n print '.....................................'\n return render(request,'login.html',ctx)\n# return render(request,'user_login.html',ctx)\n\n# logout\ndef user_logout(request):\n logout(request)\n return redirect('/west')\n\ndef diff_response(request):\n if request.user.is_authenticated():\n content = \"<p> my dear user ~ </p>\"\n else:\n content = \"<p> hi, stranger. </p>\"\n return HttpResponse(content)\n\n@login_required\ndef user_only(request):\n return HttpResponse(\"<p> This message is for logged in user only. </p>\")\n\ndef name_check(user):\n return user.get_username() == 'lijing'\n\n@user_passes_test(name_check)\ndef specific_user(request):\n return HttpResponse(\"<p> for lijing only </p>\")\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.7272727489471436, "alphanum_fraction": 0.7272727489471436, "avg_line_length": 15.5, "blob_id": "1d23e7b9ddc6412239ee05e752e07946a1924dc3", "content_id": "db333f3371aa651cddf046879caac829e7ba4274", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 33, "license_type": "no_license", "max_line_length": 23, "num_lines": 2, "path": "/README.md", "repo_name": "lijingchn/mysite", "src_encoding": "UTF-8", "text": "# mysite\nmy practice of Django ~\n" } ]
5
AstroMaxx/Interpreter
https://github.com/AstroMaxx/Interpreter
e2882c6522766bee26da1f4c0bcd4d1c536844fc
08a9d4430de3143f3199925520b633fcc7ffbcce
6fe56106c1809607852dab9265bf4b9599d1a859
refs/heads/master
2022-12-01T14:02:01.979991
2020-08-04T12:02:37
2020-08-04T12:02:37
284,965,809
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.3218609094619751, "alphanum_fraction": 0.34262385964393616, "avg_line_length": 41.852882385253906, "blob_id": "b8abff860f05cdecf82d827a8a0731147aab3509", "content_id": "a93185bd82108f211d9fb8aed9b9e07b4e49c33b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 85248, "license_type": "no_license", "max_line_length": 174, "num_lines": 1944, "path": "/interp2.py", "repo_name": "AstroMaxx/Interpreter", "src_encoding": "UTF-8", "text": "import random\r\n\r\nclass Interp:\r\n\r\n def __init__(self, prog):\r\n self.prog = prog\r\n self.pc = 0\r\n\r\n def eval(self, expr):\r\n etype = expr[0]\r\n if etype == 'INTEG':\r\n return expr[1]\r\n elif etype == 'BOOL':\r\n if expr[1] == 'T':\r\n return True\r\n else:\r\n return False\r\n elif etype == 'INT':\r\n if expr[1] in self.vars:\r\n return self.vars[expr[1]][0][0]\r\n else:\r\n print('Uninitialized variable ', expr[1])\r\n raise RuntimeError\r\n elif etype == 'BOOLE':\r\n if expr[1] in self.vars:\r\n return self.vars[expr[1]][0][0]\r\n else:\r\n print('Uninitialized variable ', expr[1])\r\n raise RuntimeError\r\n elif etype == 'PROC':\r\n if expr[1] in self.vars:\r\n return self.vars[expr[1]][0][0]\r\n else:\r\n raise RuntimeError\r\n elif etype == 'MASPROC':\r\n if len(expr) == 3:\r\n if expr[1] in self.vars and 'PROC' in self.vars[expr[1]][1]:\r\n return self.vars[expr[1]][0][self.eval(expr[2])]\r\n else:\r\n raise RuntimeError\r\n else:\r\n if expr[1] in self.vars and 'PROC' in self.vars[expr[1]][1]:\r\n dims = self.list_dim(expr[3])\r\n if isinstance(dims, int):\r\n dims = [dims]\r\n dims.insert(0, self.eval(expr[2]))\r\n return self.elem(self.vars[expr[1]][0], dims)\r\n else:\r\n raise RuntimeError\r\n\r\n elif etype == 'MASINT':\r\n if len(expr) == 3:\r\n if expr[1] in self.vars:\r\n if isinstance(self.vars[expr[1]][0][self.eval(expr[2])], int):\r\n return self.vars[expr[1]][0][self.eval(expr[2])]\r\n else:\r\n print('This variable is not int')\r\n raise RuntimeError\r\n else:\r\n raise RuntimeError\r\n else:\r\n if expr[1] in self.vars:\r\n dims = self.list_dim(expr[3])\r\n if isinstance(dims, int):\r\n dims = [dims]\r\n dims.insert(0, self.eval(expr[2]))\r\n return self.elem(self.vars[expr[1]][0], dims)\r\n else:\r\n raise RuntimeError\r\n elif etype == 'MASBOOL':\r\n if len(expr) == 3:\r\n if expr[1] in self.vars:\r\n return self.vars[expr[1]][0][self.eval(expr[2])]\r\n else:\r\n raise RuntimeError\r\n else:\r\n if expr[1] in self.vars:\r\n dims = self.list_dim(expr[3])\r\n if isinstance(dims, int):\r\n dims = [dims]\r\n dims.insert(0, self.eval(expr[2]))\r\n return self.elem(self.vars[expr[1]][0], dims)\r\n else:\r\n raise RuntimeError\r\n elif etype == 'BINOP':\r\n if expr[1] == ',#':\r\n self.plus(expr[2])\r\n else:\r\n self.minus(expr[2])\r\n elif etype == 'RELOP':\r\n self.proced(expr[1])\r\n self.proced(expr[3])\r\n if expr[2] == 'EQ':\r\n if expr[3] != 'NP':\r\n if isinstance(self.eval(expr[1]), int) and isinstance(self.eval(expr[3]), int):\r\n if self.eval(expr[1]) == self.eval(expr[3]):\r\n return True\r\n else:\r\n return False\r\n elif expr[1][0] == 'RELOP' and isinstance(self.eval(expr[3]), bool):\r\n if self.relop(expr[1]) == self.eval(expr[3]):\r\n return True\r\n else:\r\n return False\r\n else:\r\n if not (expr[1][1] in self.vars) or ('PROC' in self.vars[expr[1][1]][1] and not ('STATGROUP' in self.vars[expr[1][1]][0])):\r\n return True\r\n else:\r\n return False\r\n elif expr[2] == 'MO':\r\n if isinstance(self.eval(expr[1]), int) and isinstance(self.eval(expr[3]), int):\r\n if self.eval(expr[1]) > self.eval(expr[3]):\r\n return True\r\n else:\r\n return False\r\n elif etype == 'LOGIC':\r\n self.proced(expr[1])\r\n self.proced(expr[2])\r\n first = self.eval(expr[1])\r\n second = self.eval(expr[2])\r\n if first == True or second == True:\r\n return True\r\n else:\r\n return False\r\n elif etype == 'PIERCE':\r\n if self.eval(expr[1]) == True:\r\n return False\r\n else:\r\n return True\r\n elif etype == 'PIERCES':\r\n if self.eval(expr[1]) == True:\r\n return False\r\n else:\r\n return True\r\n\r\n elif etype == 'IDENT':\r\n self.proced(expr[1])\r\n self.proced(expr[3])\r\n if expr[2] == '@':\r\n return self.bind(expr[1], expr[3])\r\n else:\r\n return self.unbind(expr[1], expr[3])\r\n\r\n elif etype == 'MOVE':\r\n self.maze['maze'][self.maze['coord'][0]][self.maze['coord'][1]] = 2\r\n if expr[1] == 'MF':\r\n if self.maze['coord'][2] == 0:\r\n if (self.maze['coord'][0] - 1) >= 0 and self.maze['maze'][self.maze['coord'][0] - 1][self.maze['coord'][1]] != 1:\r\n self.maze['coord'][0] -= 1\r\n if self.maze['maze'][self.maze['coord'][0]][self.maze['coord'][1]] == 5:\r\n self.maze['success'] = 1\r\n print('SUCCESSED!!!')\r\n else:\r\n self.maze['maze'][self.maze['coord'][0]][self.maze['coord'][1]] = 2\r\n self.print_maze()\r\n return True\r\n else:\r\n return False\r\n elif self.maze['coord'][2] == 1:\r\n if (self.maze['coord'][1] - 1) >= 0 and self.maze['maze'][self.maze['coord'][0]][self.maze['coord'][1] - 1] != 1:\r\n self.maze['coord'][1] -= 1\r\n if self.maze['maze'][self.maze['coord'][0]][self.maze['coord'][1]] == 5:\r\n self.maze['success'] = 1\r\n print('SUCCESSED!!!')\r\n else:\r\n self.maze['maze'][self.maze['coord'][0]][self.maze['coord'][1]] = 2\r\n self.print_maze()\r\n return True\r\n else:\r\n return False\r\n elif self.maze['coord'][2] == 2:\r\n if len(self.maze['maze']) > self.maze['coord'][0] + 1 and self.maze['maze'][self.maze['coord'][0] + 1][self.maze['coord'][1]] != 1:\r\n self.maze['coord'][0] += 1\r\n if self.maze['maze'][self.maze['coord'][0]][self.maze['coord'][1]] == 5:\r\n self.maze['success'] = 1\r\n print('SUCCESSED!!!')\r\n else:\r\n self.maze['maze'][self.maze['coord'][0]][self.maze['coord'][1]] = 2\r\n self.print_maze()\r\n return True\r\n else:\r\n return False\r\n elif self.maze['coord'][2] == 3:\r\n if len(self.maze['maze'][self.maze['coord'][0]]) > self.maze['coord'][1] + 1 and self.maze['maze'][self.maze['coord'][0]][self.maze['coord'][1] + 1] != 1:\r\n self.maze['coord'][1] += 1\r\n if self.maze['maze'][self.maze['coord'][0]][self.maze['coord'][1]] == 5:\r\n self.maze['success'] = 1\r\n print('SUCCESSED!!!')\r\n else:\r\n self.maze['maze'][self.maze['coord'][0]][self.maze['coord'][1]] = 2\r\n self.print_maze()\r\n return True\r\n else:\r\n return False\r\n elif expr[1] == 'ML':\r\n if self.maze['coord'][2] == 0:\r\n if (self.maze['coord'][1] - 1) >= 0 and self.maze['maze'][self.maze['coord'][0]][self.maze['coord'][1] - 1] != 1:\r\n self.maze['coord'][1] -= 1\r\n if self.maze['maze'][self.maze['coord'][0]][self.maze['coord'][1]] == 5:\r\n self.maze['success'] = 1\r\n print('SUCCESSED!!!')\r\n else:\r\n self.maze['maze'][self.maze['coord'][0]][self.maze['coord'][1]] = 2\r\n self.maze['coord'][2] = 1\r\n self.print_maze()\r\n return True\r\n else:\r\n return False\r\n elif self.maze['coord'][2] == 1:\r\n if len(self.maze['maze']) > self.maze['coord'][0] + 1 and self.maze['maze'][self.maze['coord'][0] + 1][self.maze['coord'][1]] != 1:\r\n self.maze['coord'][0] += 1\r\n if self.maze['maze'][self.maze['coord'][0]][self.maze['coord'][1]] == 5:\r\n self.maze['success'] = 1\r\n print('SUCCESSED!!!')\r\n else:\r\n self.maze['maze'][self.maze['coord'][0]][self.maze['coord'][1]] = 2\r\n self.maze['coord'][2] = 2\r\n self.print_maze()\r\n return True\r\n else:\r\n return False\r\n elif self.maze['coord'][2] == 2:\r\n if len(self.maze['maze'][self.maze['coord'][0]]) > self.maze['coord'][1] + 1 and self.maze['maze'][self.maze['coord'][0]][self.maze['coord'][1] + 1] != 1:\r\n self.maze['coord'][1] += 1\r\n if self.maze['maze'][self.maze['coord'][0]][self.maze['coord'][1]] == 5:\r\n self.maze['success'] = 1\r\n print('SUCCESSED!!!')\r\n else:\r\n self.maze['maze'][self.maze['coord'][0]][self.maze['coord'][1]] = 2\r\n self.maze['coord'][2] = 3\r\n self.print_maze()\r\n return True\r\n else:\r\n return False\r\n elif self.maze['coord'][2] == 3:\r\n if (self.maze['coord'][0] - 1) >= 0 and self.maze['maze'][self.maze['coord'][0] - 1][self.maze['coord'][1]] != 1:\r\n self.maze['coord'][0] -= 1\r\n if self.maze['maze'][self.maze['coord'][0]][self.maze['coord'][1]] == 5:\r\n self.maze['success'] = 1\r\n print('SUCCESSED!!!')\r\n else:\r\n self.maze['maze'][self.maze['coord'][0]][self.maze['coord'][1]] = 2\r\n self.maze['coord'][2] = 0\r\n self.print_maze()\r\n return True\r\n else:\r\n return False\r\n elif expr[1] == 'MB':\r\n if self.maze['coord'][2] == 0:\r\n if len(self.maze['maze']) > self.maze['coord'][0] + 1 and self.maze['maze'][self.maze['coord'][0] + 1][self.maze['coord'][1]] != 1:\r\n self.maze['coord'][0] += 1\r\n if self.maze['maze'][self.maze['coord'][0]][self.maze['coord'][1]] == 5:\r\n self.maze['success'] = 1\r\n print('SUCCESSED!!!')\r\n else:\r\n self.maze['maze'][self.maze['coord'][0]][self.maze['coord'][1]] = 2\r\n self.maze['coord'][2] = 2\r\n self.print_maze()\r\n return True\r\n else:\r\n return False\r\n elif self.maze['coord'][2] == 1:\r\n if len(self.maze['maze'][self.maze['coord'][0]]) > self.maze['coord'][1] + 1 and self.maze['maze'][self.maze['coord'][0]][self.maze['coord'][1] + 1] != 1:\r\n self.maze['coord'][1] += 1\r\n if self.maze['maze'][self.maze['coord'][0]][self.maze['coord'][1]] == 5:\r\n self.maze['success'] = 1\r\n print('SUCCESSED!!!')\r\n else:\r\n self.maze['maze'][self.maze['coord'][0]][self.maze['coord'][1]] = 2\r\n self.maze['coord'][2] = 3\r\n self.print_maze()\r\n return True\r\n else:\r\n return False\r\n elif self.maze['coord'][2] == 2:\r\n if (self.maze['coord'][0] - 1) >= 0 and self.maze['maze'][self.maze['coord'][0] - 1][self.maze['coord'][1]] != 1:\r\n self.maze['coord'][0] -= 1\r\n if self.maze['maze'][self.maze['coord'][0]][self.maze['coord'][1]] == 5:\r\n self.maze['success'] = 1\r\n print('SUCCESSED!!!')\r\n else:\r\n self.maze['maze'][self.maze['coord'][0]][self.maze['coord'][1]] = 2\r\n self.maze['coord'][2] = 0\r\n self.print_maze()\r\n return True\r\n else:\r\n return False\r\n elif self.maze['coord'][2] == 3:\r\n if (self.maze['coord'][1] - 1) >= 0 and self.maze['maze'][self.maze['coord'][0]][self.maze['coord'][1] - 1] != 1:\r\n self.maze['coord'][0] -= 1\r\n if self.maze['maze'][self.maze['coord'][0]][self.maze['coord'][1]] == 5:\r\n self.maze['success'] = 1\r\n print('SUCCESSED!!!')\r\n else:\r\n self.maze['maze'][self.maze['coord'][0]][self.maze['coord'][1]] = 2\r\n self.maze['coord'][2] = 1\r\n self.print_maze()\r\n return True\r\n else:\r\n return False\r\n elif expr[1] == 'MR':\r\n if self.maze['coord'][2] == 0:\r\n if len(self.maze['maze'][self.maze['coord'][0]]) > self.maze['coord'][1] + 1 and self.maze['maze'][self.maze['coord'][0]][self.maze['coord'][1] + 1] != 1:\r\n self.maze['coord'][1] += 1\r\n if self.maze['maze'][self.maze['coord'][0]][self.maze['coord'][1]] == 5:\r\n self.maze['success'] = 1\r\n print('SUCCESSED!!!')\r\n else:\r\n self.maze['maze'][self.maze['coord'][0]][self.maze['coord'][1]] = 2\r\n self.maze['coord'][2] = 3\r\n self.print_maze()\r\n return True\r\n else:\r\n return False\r\n elif self.maze['coord'][2] == 1:\r\n if (self.maze['coord'][0] - 1) >= 0 and self.maze['maze'][self.maze['coord'][0] - 1][self.maze['coord'][1]] != 1:\r\n self.maze['coord'][0] -= 1\r\n if self.maze['maze'][self.maze['coord'][0]][self.maze['coord'][1]] == 5:\r\n self.maze['success'] = 1\r\n print('SUCCESSED!!!')\r\n else:\r\n self.maze['maze'][self.maze['coord'][0]][self.maze['coord'][1]] = 2\r\n self.maze['coord'][2] = 0\r\n self.print_maze()\r\n return True\r\n else:\r\n return False\r\n elif self.maze['coord'][2] == 2:\r\n if (self.maze['coord'][1] - 1) >= 0 and self.maze['maze'][self.maze['coord'][0]][self.maze['coord'][1] - 1] != 1:\r\n self.maze['coord'][1] -= 1\r\n if self.maze['maze'][self.maze['coord'][0]][self.maze['coord'][1]] == 5:\r\n self.maze['success'] = 1\r\n print('SUCCESSED!!!')\r\n else:\r\n self.maze['maze'][self.maze['coord'][0]][self.maze['coord'][1]] = 2\r\n self.maze['coord'][2] = 1\r\n self.print_maze()\r\n return True\r\n else:\r\n return False\r\n elif self.maze['coord'][2] == 3:\r\n if len(self.maze['maze']) > self.maze['coord'][0] + 1 and self.maze['maze'][self.maze['coord'][0] + 1][self.maze['coord'][1]] != 1:\r\n self.maze['coord'][0] += 1\r\n if self.maze['maze'][self.maze['coord'][0]][self.maze['coord'][1]] == 5:\r\n self.maze['success'] = 1\r\n print('SUCCESSED!!!')\r\n else:\r\n self.maze['maze'][self.maze['coord'][0]][self.maze['coord'][1]] = 2\r\n self.maze['coord'][2] = 2\r\n self.print_maze()\r\n return True\r\n else:\r\n return False\r\n elif expr[1] == 'TP':\r\n if self.maze['tp'] > 0:\r\n x = random.randint(0, len(self.maze['maze']) - 1)\r\n y = random.randint(0, len(self.maze['maze'][self.maze['coord'][0]]) - 1)\r\n i = 1\r\n while self.maze['maze'][x][y] == 1 or self.maze['maze'][x][y] == 2:\r\n if(i > 50):\r\n print('FAILED!!!')\r\n return False\r\n x = random.randint(0, len(self.maze['maze']) - 1)\r\n y = random.randint(0, len(self.maze['maze'][self.maze['coord'][0]]) - 1)\r\n i += 1\r\n self.maze['coord'][0] = x\r\n self.maze['coord'][1] = y\r\n if self.maze['maze'][self.maze['coord'][0]][self.maze['coord'][1]] == 5:\r\n self.maze['success'] = 1\r\n print('SUCCESSED!!!')\r\n else:\r\n self.maze['maze'][self.maze['coord'][0]][self.maze['coord'][1]] = 2\r\n self.maze['tp'] -= 1\r\n self.print_maze()\r\n print('TELEPORT!!!')\r\n return True\r\n else:\r\n self.maze['success'] = -1\r\n print('FAILED!!!')\r\n return False\r\n\r\n\r\n def print_maze(self):\r\n for i in range(len(self.maze['maze'])):\r\n print()\r\n for j in range(len(self.maze['maze'][i])):\r\n if i == self.maze['coord'][0] and j == self.maze['coord'][1]:\r\n print(7, end = '')\r\n else:\r\n print(self.maze['maze'][i][j], end = '')\r\n print()\r\n print()\r\n\r\n def bind(self, id, expr):\r\n if id == expr:\r\n print('Recursion')\r\n return False\r\n elif not (id[1] in self.vars):\r\n print('Uninitialized variable', id[1])\r\n return False\r\n else:\r\n if expr[0] == 'PROC' or expr[0] == 'MASPROC':\r\n if id[0] == 'INT':\r\n if not ('PROC' in self.ident[id[1]][1] and 'BOOL' in self.ident[id[1]][1]):\r\n if len(self.vars[id[1]][0]) == 1:\r\n self.ident[id[1]][0][0].append(expr)\r\n return True\r\n else:\r\n print('Its massiv')\r\n return False\r\n else:\r\n print('Error variable')\r\n return False\r\n elif id[0] == 'BOOL':\r\n if not ('PROC' in self.ident[id[1]][1] and 'INT' in self.ident[id[1]][1]):\r\n if len(self.vars[id[1]][0]) == 1:\r\n self.ident[id[1]][0][0].append(expr)\r\n return True\r\n else:\r\n print('Its massiv')\r\n return False\r\n else:\r\n print('Error variable')\r\n return False\r\n elif id[0] == 'PROC':\r\n if not ('BOOL' in self.ident[id[1]][1] and 'INT' in self.ident[id[1]][1]):\r\n if len(self.vars[id[1]][0]) == 1:\r\n self.ident[id[1]][0][0].append(expr)\r\n return True\r\n else:\r\n print('Its massiv')\r\n return False\r\n else:\r\n print('Error variable')\r\n return False\r\n elif id[0] == 'MASINT':\r\n if not ('PROC' in self.ident[id[1]][1] and 'BOOL' in self.ident[id[1]][1]):\r\n if len(id) == 3:\r\n if len(self.ident[id[1]][0]) > self.eval(id[2]):\r\n self.ident[id[1]][0][self.eval(id[2])].append(expr)\r\n return True\r\n else:\r\n print('Going out of the array')\r\n return False\r\n else:\r\n pres = self.vars[id[1]][0]\r\n dims = self.list_dim(id[3])\r\n if isinstance(dims, int):\r\n dims = [dims]\r\n dims.insert(0, self.eval(id[2]))\r\n pr = self.list_mas(pres)\r\n if len(pr) == len(dims):\r\n for i in range(len(dims)):\r\n if pr[i] < dims[i]:\r\n print('Going out of the array')\r\n return False\r\n self.eq_mas2(dims, self.ident[id[1]][0], expr)\r\n return True\r\n else:\r\n print('Going out of the array')\r\n return False\r\n else:\r\n print('Error variable')\r\n return False\r\n elif id[0] == 'MASBOOL':\r\n if not ('PROC' in self.ident[id[1]][1] and 'INT' in self.ident[id[1]][1]):\r\n if len(id) == 3:\r\n if len(self.ident[id[1]][0]) > self.eval(id[2]):\r\n self.ident[id[1]][0][self.eval(id[2])].append(expr)\r\n return True\r\n else:\r\n print('Going out of the array')\r\n return False\r\n else:\r\n pres = self.vars[id[1]][0]\r\n dims = self.list_dim(id[3])\r\n if isinstance(dims, int):\r\n dims = [dims]\r\n dims.insert(0, self.eval(id[2]))\r\n pr = self.list_mas(pres)\r\n if len(pr) == len(dims):\r\n for i in range(len(dims)):\r\n if pr[i] < dims[i]:\r\n print('Going out of the array')\r\n return False\r\n self.eq_mas2(dims, self.ident[id[1]][0], expr)\r\n return True\r\n else:\r\n print('Going out of the array')\r\n return False\r\n else:\r\n print('Error variable')\r\n return False\r\n elif id[0] == 'MASPROC':\r\n if not ('INT' in self.ident[id[1]][1] and 'BOOL' in self.ident[id[1]][1]):\r\n if len(id) == 3:\r\n if len(self.ident[id[1]][0]) > self.eval(id[2]):\r\n self.ident[id[1]][0][self.eval(id[2])].append(expr)\r\n return True\r\n else:\r\n print('Going out of the array')\r\n return False\r\n else:\r\n pres = self.vars[id[1]][0]\r\n dims = self.list_dim(id[3])\r\n if isinstance(dims, int):\r\n dims = [dims]\r\n dims.insert(0, self.eval(id[2]))\r\n pr = self.list_mas(pres)\r\n if len(pr) == len(dims):\r\n for i in range(len(dims)):\r\n if pr[i] < dims[i]:\r\n print('Going out of the array')\r\n return False\r\n self.eq_mas2(dims, self.ident[id[1]][0], expr)\r\n return True\r\n else:\r\n print('Going out of the array')\r\n return False\r\n else:\r\n print('Error variable')\r\n return False\r\n\r\n def proced(self, expr):\r\n etype = expr[0]\r\n if etype == 'INT':\r\n if expr[1] in self.vars:\r\n if len(self.ident[expr[1]][0][0]) != 0:\r\n for i in range(len(self.ident[expr[1]][0][0])):\r\n self.ex(self.ident[expr[1]][0][0][i])\r\n else:\r\n print('Uninitialized variable' , expr[1])\r\n raise RuntimeError\r\n elif etype == 'BOOLE':\r\n if expr[1] in self.vars:\r\n if len(self.ident[expr[1]][0][0]) != 0:\r\n for i in range(len(self.ident[expr[1]][0][0])):\r\n self.ex(self.ident[expr[1]][0][0][i])\r\n else:\r\n print('Uninitialized variable' , expr[1])\r\n raise RuntimeError\r\n elif etype == 'PROC':\r\n if expr[1] in self.vars:\r\n if len(self.ident[expr[1]][0][0]) != 0:\r\n for i in range(len(self.ident[expr[1]][0][0])):\r\n self.ex(self.ident[expr[1]][0][0][i])\r\n else:\r\n print('Uninitialized variable' , expr[1])\r\n raise RuntimeError\r\n elif etype == 'MASPROC':\r\n if len(expr) == 3:\r\n if expr[1] in self.vars and 'PROC' in self.vars[expr[1]][1]:\r\n for i in range(len(self.ident[expr[1]][0][self.eval(expr[2])])):\r\n self.ex(self.ident[expr[1]][0][0][i])\r\n else:\r\n if expr[1] in self.vars and 'PROC' in self.vars[expr[1]][1]:\r\n self.proced(expr[2])\r\n self.proced(expr[3])\r\n elif etype == 'MASINT':\r\n if len(expr) == 3:\r\n if expr[1] in self.vars:\r\n if isinstance(self.vars[expr[1]][0][self.eval(expr[2])], int):\r\n for i in range(len(self.ident[expr[1]][0][self.eval(expr[2])])):\r\n self.ex(self.ident[expr[1]][0][0][i])\r\n else:\r\n if expr[1] in self.vars:\r\n self.proced(expr[2])\r\n self.proced(expr[3])\r\n elif etype == 'MASBOOL':\r\n if len(expr) == 3:\r\n if expr[1] in self.vars:\r\n for i in range(len(self.ident[expr[1]][0][self.eval(expr[2])])):\r\n self.ex(self.ident[expr[1]][0][0][i])\r\n return self.vars[expr[1]][0][self.eval(expr[2])]\r\n else:\r\n if expr[1] in self.vars:\r\n self.proced(expr[2])\r\n self.proced(expr[3])\r\n elif etype == 'DIMS':\r\n self.proced(expr[1])\r\n self.proced(expr[2])\r\n\r\n def unbind(self, id, expr):\r\n if id == expr:\r\n print('Recursion')\r\n return False\r\n elif not (id[1] in self.vars):\r\n print('Uninitialized variable')\r\n return False\r\n else:\r\n if expr[0] == 'PROC' or expr[0] == 'MASPROC':\r\n if id[0] == 'INT':\r\n if not ('PROC' in self.ident[id[1]][1] and 'BOOL' in self.ident[id[1]][1]):\r\n if len(self.vars[id[1]][0]) == 1:\r\n if expr in self.ident[id[1]][0][0]:\r\n self.ident[id[1]][0][0].remove(expr)\r\n return True\r\n else:\r\n return True\r\n else:\r\n print('Its massiv')\r\n return False\r\n else:\r\n print('Error variable')\r\n return False\r\n elif id[0] == 'BOOL':\r\n if not ('PROC' in self.ident[id[1]][1] and 'INT' in self.ident[id[1]][1]):\r\n if len(self.vars[id[1]][0]) == 1:\r\n if expr in self.ident[id[1]][0][0]:\r\n self.ident[id[1]][0][0].remove(expr)\r\n return True\r\n else:\r\n return True\r\n else:\r\n print('Its massiv')\r\n return False\r\n else:\r\n print('Error variable')\r\n return False\r\n elif id[0] == 'PROC':\r\n if not ('BOOL' in self.ident[id[1]][1] and 'INT' in self.ident[id[1]][1]):\r\n if len(self.vars[id[1]][0]) == 1:\r\n if expr in self.ident[id[1]][0][0]:\r\n self.ident[id[1]][0][0].remove(expr)\r\n return True\r\n else:\r\n return True\r\n else:\r\n print('Its massiv')\r\n return False\r\n else:\r\n print('Error variable')\r\n return False\r\n elif id[0] == 'MASINT':\r\n if not ('PROC' in self.ident[id[1]][1] and 'BOOL' in self.ident[id[1]][1]):\r\n if len(id) == 3:\r\n if len(self.ident[id[1]][0]) > self.eval(id[2]): \r\n if expr in self.ident[id[1]][0][self.eval(id[2])]:\r\n self.ident[id[1]][0][0].remove(expr)\r\n return True\r\n else:\r\n return True\r\n else:\r\n print('Going out of the array')\r\n return False\r\n else:\r\n pres = self.vars[id[1]][0]\r\n dims = self.list_dim(id[3])\r\n if isinstance(dims, int):\r\n dims = [dims]\r\n dims.insert(0, self.eval(id[2]))\r\n pr = self.list_mas(pres)\r\n if len(pr) == len(dims):\r\n for i in range(len(dims)):\r\n if pr[i] < dims[i]:\r\n print('Going out of the array')\r\n return False\r\n self.eq_mas3(dims, self.ident[id[1]][0], expr)\r\n return True\r\n else:\r\n print('Going out of the array')\r\n return False\r\n else:\r\n print('Error variable')\r\n return False\r\n elif id[0] == 'MASBOOL':\r\n if not ('PROC' in self.ident[id[1]][1] and 'INT' in self.ident[id[1]][1]):\r\n if len(id) == 3:\r\n if len(self.ident[id[1]][0]) > self.eval(id[2]):\r\n if expr in self.ident[id[1]][0][self.eval(id[2])]:\r\n self.ident[id[1]][0][0].remove(expr)\r\n return True\r\n else:\r\n return True\r\n else:\r\n print('Going out of the array')\r\n return False\r\n else:\r\n pres = self.vars[id[1]][0]\r\n dims = self.list_dim(id[3])\r\n if isinstance(dims, int):\r\n dims = [dims]\r\n dims.insert(0, self.eval(id[2]))\r\n pr = self.list_mas(pres)\r\n if len(pr) == len(dims):\r\n for i in range(len(dims)):\r\n if pr[i] < dims[i]:\r\n print('Going out of the array')\r\n return False\r\n self.eq_mas3(dims, self.ident[id[1]][0], expr)\r\n return True\r\n else:\r\n print('Going out of the array')\r\n return False\r\n else:\r\n print('Error variable')\r\n return False\r\n elif id[0] == 'MASPROC':\r\n if not ('INT' in self.ident[id[1]][1] and 'BOOL' in self.ident[id[1]][1]):\r\n if len(id) == 3:\r\n if len(self.ident[id[1]][0]) > self.eval(id[2]):\r\n if expr in self.ident[id[1]][0][self.eval(id[2])]:\r\n self.ident[id[1]][0][0].remove(expr)\r\n return True\r\n else:\r\n return True\r\n else:\r\n print('Going out of the array')\r\n return False\r\n else:\r\n pres = self.vars[id[1]][0]\r\n dims = self.list_dim(id[3])\r\n if isinstance(dims, int):\r\n dims = [dims]\r\n dims.insert(0, self.eval(id[2]))\r\n pr = self.list_mas(pres)\r\n if len(pr) == len(dims):\r\n for i in range(len(dims)):\r\n if pr[i] < dims[i]:\r\n print('Going out of the array')\r\n return False\r\n self.eq_mas3(dims, self.ident[id[1]][0], expr)\r\n return True\r\n else:\r\n print('Going out of the array')\r\n return False\r\n else:\r\n print('Error variable')\r\n return False\r\n \r\n\r\n def evproc(self, expr):\r\n etype = expr[0]\r\n if etype == 'INT':\r\n if expr[1] in self.ident:\r\n return self.ident[expr[1]][0][0]\r\n else:\r\n print('Uninitialized variable')\r\n raise RuntimeError\r\n elif etype == 'BOOLE':\r\n if expr[1] in self.ident:\r\n return self.ident[expr[1]][0][0]\r\n else:\r\n print('Uninitialized variable')\r\n raise RuntimeError\r\n elif etype == 'PROC':\r\n if expr[1] in self.ident:\r\n return self.ident[expr[1]][0][0]\r\n else:\r\n print('Uninitialized variable')\r\n raise RuntimeError\r\n elif etype == 'MASPROC':\r\n if len(expr) == 3:\r\n if expr[1] in self.ident and 'PROC' in self.ident[expr[1]][1]:\r\n return self.ident[expr[1]][0][self.eval(expr[2])]\r\n else:\r\n print('Uninitialized variable')\r\n raise RuntimeError\r\n else:\r\n if expr[1] in self.ident and 'PROC' in self.ident[expr[1]][1]:\r\n dims = self.list_dim(expr[3])\r\n if isinstance(dims, int):\r\n dims = [dims]\r\n dims.insert(0, self.eval(expr[2]))\r\n return self.elem(self.ident[expr[1]][0], dims)\r\n else:\r\n print('Uninitialized variable')\r\n raise RuntimeError\r\n elif etype == 'MASINT':\r\n if len(expr) == 3:\r\n if expr[1] in self.ident:\r\n if isinstance(self.ident[expr[1]][0][self.eval(expr[2])], int):\r\n return self.ident[expr[1]][0][self.eval(expr[2])]\r\n else:\r\n print('This variable is not int')\r\n raise RuntimeError\r\n else:\r\n print('Uninitialized variable')\r\n raise RuntimeError\r\n else:\r\n if expr[1] in self.ident:\r\n dims = self.list_dim(expr[3])\r\n if isinstance(dims, int):\r\n dims = [dims]\r\n dims.insert(0, self.eval(expr[2]))\r\n return self.elem(self.ident[expr[1]][0], dims)\r\n else:\r\n print('Uninitialized variable')\r\n raise RuntimeError\r\n elif etype == 'MASBOOL':\r\n if len(expr) == 3:\r\n if expr[1] in self.ident:\r\n return self.ident[expr[1]][0][self.eval(expr[2])]\r\n else:\r\n print('Uninitialized variable')\r\n raise RuntimeError\r\n else:\r\n if expr[1] in self.ident:\r\n dims = self.list_dim(expr[3])\r\n if isinstance(dims, int):\r\n dims = [dims]\r\n dims.insert(0, self.eval(expr[2]))\r\n return self.elem(self.ident, dims)\r\n else:\r\n print('Uninitialized variable')\r\n raise RuntimeError\r\n\r\n\r\n def plus(self, expr):\r\n etype = expr[0]\r\n if etype == 'INTEG':\r\n return expr[1] + 1\r\n elif etype == 'INT':\r\n if expr[1] in self.vars:\r\n self.vars[expr[1]][0][0] += 1\r\n return self.vars[expr[1]][0][0]\r\n else:\r\n print('Uninitialized variable')\r\n raise RuntimeError\r\n elif etype == 'MASINT':\r\n if len(expr) == 3:\r\n if expr[1] in self.vars:\r\n dims = []\r\n dims.insert(0, self.eval(expr[2]))\r\n return self.elem_plus(self.vars[expr[1]][0], dims)\r\n else:\r\n print('Uninitialized variable')\r\n raise RuntimeError\r\n else:\r\n if expr[1] in self.vars:\r\n dims = self.list_dim(expr[3])\r\n if isinstance(dims, int):\r\n dims = [dims]\r\n dims.insert(0, self.eval(expr[2]))\r\n return self.elem_plus(self.vars[expr[1]][0], dims)\r\n else:\r\n print('Uninitialized variable')\r\n raise RuntimeError\r\n\r\n def minus(self, expr):\r\n etype = expr[0]\r\n if etype == 'INTEG':\r\n return expr[1] - 1\r\n elif etype == 'INT':\r\n if expr[1] in self.vars:\r\n self.vars[expr[1]][0][0] -= 1\r\n return self.vars[expr[1]][0][0]\r\n else:\r\n print('Uninitialized variable')\r\n raise RuntimeError\r\n elif etype == 'MASINT':\r\n if len(expr) == 3:\r\n if expr[1] in self.vars:\r\n dims = []\r\n dims.insert(0, self.eval(expr[2]))\r\n return self.elem_minus(self.vars[expr[1]][0], dims)\r\n else:\r\n print('Uninitialized variable')\r\n raise RuntimeError\r\n else:\r\n if expr[1] in self.vars:\r\n dims = self.list_dim(expr[3])\r\n if isinstance(dims, int):\r\n dims = [dims]\r\n dims.insert(0, self.eval(expr[2]))\r\n return self.elem_minus(self.vars[expr[1]][0], dims)\r\n else:\r\n print('Uninitialized variable')\r\n raise RuntimeError\r\n\r\n\r\n def elem(self, mas, dims):\r\n if len(dims) != 1:\r\n a = self.elem(mas[dims[0]], dims[1:])\r\n else:\r\n if isinstance(mas, int):\r\n a = mas\r\n else:\r\n a = mas[dims[0]]\r\n return a\r\n\r\n\r\n def elem_plus(self, mas, dims):\r\n if len(dims) > 1:\r\n a = self.elem_plus(mas[dims[0]], dims[1:])\r\n else:\r\n if isinstance(mas, int):\r\n mas += 1\r\n a = mas\r\n elif type(mas[dims[0]]) != list:\r\n mas[dims[0]] += 1\r\n a = mas[dims[0]]\r\n else:\r\n a = self.elem_plus(mas[dims[0]], [0])\r\n return a\r\n\r\n def elem_minus(self, mas, dims):\r\n if len(dims) > 1:\r\n a = self.elem_plus(mas[dims[0]], dims[1:])\r\n else:\r\n if isinstance(mas, int):\r\n mas -= 1\r\n a = mas\r\n elif type(mas[dims[0]]) != list:\r\n mas[dims[0]] -= 1\r\n a = mas[dims[0]]\r\n else:\r\n a = self.elem_plus(mas[dims[0]], [0])\r\n return a\r\n\r\n\r\n def list_dim(self, dims):\r\n if dims[0] == 'DIMS':\r\n a = []\r\n b = (self.list_dim(dims[1]))\r\n c = (self.eval(dims[2]))\r\n if type(b) == list:\r\n a = b.copy()\r\n else:\r\n a.append(b)\r\n a.append(c)\r\n else:\r\n a = (self.eval(dims))\r\n return a\r\n\r\n def make_mas(self, expr, dims):\r\n a = []\r\n if not isinstance(expr, int):\r\n ex = self.eval(expr) + 1\r\n else:\r\n ex = expr + 1\r\n if type(dims) != list:\r\n dim = self.list_dim(dims)\r\n else:\r\n dim = dims\r\n for i in range (ex):\r\n a.append(self.massiv(dim, 0))\r\n return a\r\n\r\n def make_mas1(self, expr, dims):\r\n a = []\r\n if not isinstance(expr, int):\r\n ex = self.eval(expr) + 1\r\n else:\r\n ex = expr + 1\r\n if type(dims) != list:\r\n dim = self.list_dim(dims)\r\n else:\r\n dim = dims\r\n for i in range (ex):\r\n a.append(self.massiv1(dim, 0))\r\n return a\r\n\r\n def massiv1(self, dims, num):\r\n a = []\r\n if type(dims) != list:\r\n dims = [dims]\r\n for i in range (dims[num] + 1):\r\n if num != (len(dims) - 1):\r\n a.append(self.massiv1(dims, num + 1))\r\n else:\r\n a.append([])\r\n return a\r\n \r\n def massiv(self, dims, num):\r\n a = []\r\n if type(dims) != list:\r\n dims = [dims]\r\n for i in range (dims[num] + 1):\r\n if num != (len(dims) - 1):\r\n a.append(self.massiv(dims, num + 1))\r\n else:\r\n a.append(0)\r\n return a\r\n\r\n def list_mas(self, mas):\r\n a = []\r\n a.append(len(mas) - 1)\r\n if type(mas[0]) == list:\r\n b = self.list_mas(mas[0])\r\n for i in range(len(b)):\r\n a.append(b[i])\r\n return a\r\n\r\n def new_list(self, new ,pr):\r\n pres = self.list_mas(pr)\r\n a = []\r\n size = max(len(new), len(pres))\r\n for i in range (size):\r\n if i >= len(pres) or (i < len(new) and new[i] > pres[i]):\r\n a.append(new[i])\r\n else:\r\n a.append(pres[i])\r\n return a\r\n\r\n def make_new_mas(self, new, pres):\r\n coun = len(pres)\r\n for i in range(coun):\r\n if type(new[i]) == list and type(pres[i]) == list:\r\n new[i] = self.make_new_mas(new[i], pres[i])\r\n else:\r\n new[i] = self.make_new_mas2(new[i], pres[i])\r\n return new\r\n\r\n\r\n def make_new_mas2(self, new, pres):\r\n if type(new) == list and type(pres) != list:\r\n new[0] = self.make_new_mas2(new[0], pres)\r\n else:\r\n new = pres\r\n return new\r\n\r\n def eq_mas(self, dims, mas, eq):\r\n if len(dims) != 1:\r\n mas[dims[0]] = self.eq_mas(dims[1:], mas[dims[0]], eq)\r\n elif type(mas[dims[0]]) == list:\r\n mas[dims[0]] = self.eq_mas([0], mas[dims[0]], eq)\r\n else:\r\n mas[dims[0]] = eq\r\n return mas\r\n\r\n def eq_mas1(self, dims, mas, eq):\r\n if len(dims) != 1:\r\n mas[dims[0]] = self.eq_mas1(dims[1:], mas[dims[0]], eq)\r\n elif type(mas[dims[0]]) == list and len(mas[dims[0]]) != 0:\r\n mas[dims[0]] = self.eq_mas1([0], mas[dims[0]], eq)\r\n else:\r\n mas[dims[0]] = eq\r\n return mas\r\n\r\n def eq_mas2(self, dims, mas, eq):\r\n if len(dims) != 1:\r\n mas[dims[0]] = self.eq_mas2(dims[1:], mas[dims[0]], eq)\r\n else:\r\n mas[dims[0]].append(eq)\r\n return mas\r\n\r\n def eq_mas3(self, dims, mas, eq):\r\n if len(dims) != 1:\r\n mas[dims[0]] = self.eq_mas3(dims[1:], mas[dims[0]], eq)\r\n else:\r\n if eq in mas[dims[0]]:\r\n mas[dims[0]].remove(eq)\r\n return mas\r\n\r\n def eq2(self, mas, eq):\r\n if type(mas) == list:\r\n mas[0] = self.eq2(mas[0], eq)\r\n else:\r\n mas = eq\r\n return mas\r\n\r\n def test_rec(self, ex1, ex2):\r\n if ex1 == ex2:\r\n return False\r\n if ex1[0] == 'PROC' or ex1[0] == 'INT' or ex1[0] == 'BOOL':\r\n id1 = self.ident[ex1[1]][0][0]\r\n else:\r\n if len(ex1) == 3:\r\n id1 = self.ident[ex1[1]][0][self.eval(ex1[2])]\r\n else:\r\n dims = self.list_dim(ex1[3])\r\n if isinstance(dims, int):\r\n dims = [dims]\r\n dims.insert(0, self.eval(ex1[2]))\r\n id1 = self.elem(self.ident[ex1[1]][0], dims)\r\n if ex2[0] == 'PROC':\r\n id2 = self.ident[ex2[1]][0][0]\r\n else:\r\n if len(ex2) == 3:\r\n id2 = self.ident[ex2[1]][0][self.eval(ex2[2])]\r\n else:\r\n dims = self.list_dim(ex2[3])\r\n if isinstance(dims, int):\r\n dims = [dims]\r\n dims.insert(0, self.eval(ex2[2]))\r\n id2 = self.elem(self.ident[ex2[1]][0], dims)\r\n if len(id1) == 0 and len(id2) == 0:\r\n return True\r\n elif len(id1) != 0 and len(id2) == 0:\r\n return True\r\n else:\r\n for i in range(len(id2)):\r\n if self.test_rec(ex1, id2[i]) == True:\r\n return True\r\n else:\r\n return False\r\n\r\n\r\n def ex(self, line):\r\n op = line[0]\r\n if op == 'ASSINT':\r\n if len(line) == 3:\r\n if not (line[1] in self.vars) or 'BOOL' in self.vars[line[1]][1] or 'PROC' in self.vars[line[1]][1]:\r\n a = []\r\n self.proced(line[2])\r\n a.append(self.eval(line[2]))\r\n b = []\r\n b.append(a)\r\n b.append('INT')\r\n self.vars[line[1]] = b\r\n c = []\r\n c.append([])\r\n d = []\r\n d.append(c)\r\n d.append('INT')\r\n self.ident[line[1]] = d\r\n else:\r\n self.proced(line[2])\r\n self.vars[line[1]][0][0] = self.eval(line[2])\r\n elif len(line) == 4:\r\n if not(line[1] in self.vars) or 'BOOL' in self.vars[line[1]][1] or 'PROC' in self.vars[line[1]][1]:\r\n a = []\r\n c = []\r\n j = self.eval(line[2])\r\n for i in range(j + 1):\r\n if i == j:\r\n a.append(self.eval(line[3]))\r\n c.append([])\r\n else:\r\n a.append(0)\r\n c.append([])\r\n self.proced(line[2])\r\n self.proced(line[3])\r\n b = []\r\n b.append(a)\r\n b.append('INT')\r\n self.vars[line[1]] = b\r\n d = []\r\n d.append(c)\r\n d.append('INT')\r\n self.ident[line[1]] = d\r\n else:\r\n if len(self.vars[line[1]][0]) >= self.eval(line[2]):\r\n self.proced(line[2])\r\n self.proced(line[3])\r\n self.eq2(self.vars[line[1]][0][self.eval(line[2])], self.eval(line[3]))\r\n else:\r\n a = []\r\n d = []\r\n self.proced(line[2])\r\n j = self.eval(line[2])\r\n c = len(self.vars[line[1]][0])\r\n for i in range(j + 1):\r\n if i < c:\r\n a.append(self.vars[line[1]][0][i])\r\n d.append(self.ident[line[1]][0][i])\r\n elif i == j:\r\n self.proced(line[3])\r\n a.append(self.eval(line[3]))\r\n d.append([])\r\n else:\r\n a.append(0)\r\n d.append([])\r\n b = []\r\n b.append(a)\r\n b.append('INT')\r\n self.vars[line[1]] = b\r\n e = []\r\n e.append(d)\r\n e.append('INT')\r\n self.ident[line[1]] = e\r\n else:\r\n if not(line[1] in self.vars) or 'BOOL' in self.vars[line[1]][1] or 'PROC' in self.vars[line[1]][1]:\r\n a = self.make_mas(line[2], line[3])\r\n c = self.make_mas1(line[2], line[3])\r\n self.proced(line[2])\r\n self.proced(line[3])\r\n self.proced(line[4])\r\n dims = self.list_dim(line[3])\r\n if isinstance(dims, int):\r\n dims = [dims]\r\n dims.insert(0, self.eval(line[2]))\r\n a = self.eq_mas(dims, a, self.eval(line[4]))\r\n c = self.eq_mas1(dims, c, [])\r\n b = []\r\n b.append(a)\r\n b.append('INT')\r\n self.vars[line[1]] = b\r\n d = []\r\n d.append(c)\r\n d.append('INT')\r\n self.ident[line[1]] = d\r\n else:\r\n self.proced(line[2])\r\n self.proced(line[3])\r\n self.proced(line[4])\r\n pres = self.vars[line[1]][0]\r\n dims = self.list_dim(line[3])\r\n if isinstance(dims, int):\r\n dims = [dims]\r\n dims.insert(0, self.eval(line[2]))\r\n list = self.new_list(dims, pres)\r\n new = self.make_mas(list[0], list[1:])\r\n a = self.make_new_mas(new, pres)\r\n a = self.eq_mas(dims, a, self.eval(line[4]))\r\n b = []\r\n b.append(a)\r\n b.append('INT')\r\n self.vars[line[1]] = b\r\n pres1 = self.ident[line[1]][0]\r\n new1 = self.make_mas1(list[0], list[1:])\r\n c = self.make_new_mas(new1, pres1)\r\n self.eq_mas1(dims, c, [])\r\n d = []\r\n d.append(c)\r\n d.append('INT')\r\n self.ident[line[1]] = d\r\n elif op == 'ASSBOOL':\r\n if len(line) == 3:\r\n if not (line[1] in self.vars):\r\n a = []\r\n self.proced(line[2])\r\n a.append(self.eval(line[2]))\r\n b = []\r\n b.append(a)\r\n b.append('BOOL')\r\n self.vars[line[1]] = b\r\n c = []\r\n c.append([])\r\n d = []\r\n d.append(c)\r\n d.append('BOOL')\r\n self.ident[line[1]] = d\r\n else:\r\n self.proced(line[2])\r\n self.vars[line[1]][0][0] = self.eval(line[2])\r\n elif len(line) == 4:\r\n if not(line[1] in self.vars) or 'INT' in self.vars[line[1]][1] or 'PROC' in self.vars[line[1]][1]:\r\n a = []\r\n c = []\r\n j = self.eval(line[2])\r\n for i in range(j + 1):\r\n if i == self.eval(line[2]):\r\n a.append(self.eval(line[3]))\r\n else:\r\n a.append(0)\r\n c.append([])\r\n self.proced(line[2])\r\n self.proced(line[3])\r\n b = []\r\n b.append(a)\r\n b.append('BOOL')\r\n self.vars[line[1]] = b\r\n d = []\r\n d.append(c)\r\n d.append('BOOL')\r\n self.ident[line[1]] = d\r\n else:\r\n if len(self.vars[line[1]][0]) >= self.eval(line[2]):\r\n self.proced(line[2])\r\n self.proced(line[3])\r\n self.vars[line[1]][0][self.eval(line[2])] = self.eval(line[3])\r\n else:\r\n a = []\r\n d = []\r\n self.proced(line[2])\r\n j = self.eval(line[2])\r\n c = len(self.vars[line[1]][0])\r\n for i in range(j + 1):\r\n if i < c:\r\n a.append(self.vars[line[1]][0][i])\r\n d.append(self.ident[line[1]][0][i])\r\n elif i == j:\r\n self.proced(line[3])\r\n a.append(self.eval(line[3]))\r\n d.append([])\r\n else:\r\n a.append(0)\r\n d.append([])\r\n b = []\r\n b.append(a)\r\n b.append('BOOL')\r\n self.vars[line[1]] = b\r\n e = []\r\n e.append(d)\r\n e.append('BOOL')\r\n self.ident[line[1]] = e\r\n else:\r\n if not(line[1] in self.vars) or 'INT' in self.vars[line[1]][1] or 'PROC' in self.vars[line[1]][1]:\r\n a = self.make_mas(line[2], line[3])\r\n c = self.make_mas1(line[2], line[3])\r\n dims = self.list_dim(line[3])\r\n if isinstance(dims, int):\r\n dims = [dims]\r\n dims.insert(0, self.eval(line[2]))\r\n self.proced(line[2])\r\n self.proced(line[3])\r\n self.proced(line[4])\r\n a = self.eq_mas(dims, a, self.eval(line[4]))\r\n c = self.eq_mas1(dims, c, [])\r\n b = []\r\n b.append(a)\r\n b.append('BOOL')\r\n self.vars[line[1]] = b\r\n d = []\r\n d.append(c)\r\n d.append('BOOL')\r\n self.ident[line[1]] = d\r\n else:\r\n self.proced(line[2])\r\n self.proced(line[3])\r\n self.proced(line[4])\r\n pres = self.vars[line[1]][0]\r\n dims = self.list_dim(line[3])\r\n if isinstance(dims, int):\r\n dims = [dims]\r\n dims.insert(0, self.eval(line[2]))\r\n list = self.new_list(dims, pres)\r\n new = self.make_mas(list[0], list[1:])\r\n a = self.make_new_mas(new, pres)\r\n a = self.eq_mas(dims, a, self.eval(line[4])) \r\n b = []\r\n b.append(a)\r\n b.append('BOOL')\r\n self.vars[line[1]] = b\r\n pres1 = self.ident[line[1]][0]\r\n new1 = self.make_mas1(list[0], list[1:])\r\n c = self.make_new_mas(new1, pres1)\r\n self.eq_mas1(dims, c, [])\r\n d = []\r\n d.append(c)\r\n d.append('BOOL')\r\n self.ident[line[1]] = d\r\n elif op == 'ASSPROC':\r\n if len(line) == 3:\r\n if not (line[1] in self.vars) or 'INT' in self.vars[line[1]][1] or 'BOOL' in self.vars[line[1]][1]:\r\n a = []\r\n self.proced(line[2])\r\n a.append(line[2])\r\n b = []\r\n b.append(a)\r\n b.append('PROC')\r\n self.vars[line[1]] = b\r\n c = []\r\n c.append([])\r\n d = []\r\n d.append(c)\r\n d.append('PROC')\r\n self.ident[line[1]] = d\r\n else:\r\n self.proced(line[2])\r\n self.vars[line[1]][0][0] = line[2]\r\n elif len(line) == 4:\r\n if not (line[1] in self.vars) or 'INT' in self.vars[line[1]][1] or 'BOOL' in self.vars[line[1]][1]:\r\n a = []\r\n c = []\r\n self.proced(line[2])\r\n j = self.eval(line[2])\r\n for i in range(j + 1):\r\n if i == j:\r\n a.append(line[3])\r\n else:\r\n a.append(0)\r\n c.append([])\r\n self.proced(line[3])\r\n b = []\r\n b.append(a)\r\n b.append('PROC')\r\n self.vars[line[1]] = b\r\n d = []\r\n d.append(c)\r\n d.append('PROC')\r\n self.ident[line[1]] = d\r\n else:\r\n if len(self.vars[line[1]][0]) >= self.eval(line[2]):\r\n self.proced(line[2])\r\n self.proced(line[3])\r\n self.vars[line[1]][0][self.eval(line[2])] = line[3]\r\n else:\r\n a = []\r\n d = []\r\n self.proced(line[2])\r\n self.proced(line[3])\r\n j = self.eval(line[2])\r\n c = len(self.vars[line[1]][0])\r\n for i in range(j + 1):\r\n if i < c:\r\n a.append(self.vars[line[1]][0][i])\r\n d.append(self.ident[line[1]][0][i])\r\n elif i == j:\r\n a.append(line[3])\r\n d.append([])\r\n else:\r\n a.append(0)\r\n d.append([])\r\n b = []\r\n b.append(a)\r\n b.append('PROC')\r\n self.vars[line[1]] = b\r\n e = []\r\n e.append(d)\r\n e.append('PROC')\r\n self.ident[line[1]] = e\r\n else:\r\n if not (line[1] in self.vars) or 'INT' in self.vars[line[1]][1] or 'BOOL' in self.vars[line[1]][1]:\r\n a = self.make_mas(line[2], line[3])\r\n c = self.make_mas1(line[2], line[3])\r\n self.proced(line[2])\r\n self.proced(line[3])\r\n self.proced(line[4])\r\n dims = self.list_dim(line[3])\r\n if isinstance(dims, int):\r\n dims = [dims]\r\n dims.insert(0, self.eval(line[2]))\r\n a = self.eq_mas(dims, a, line[4])\r\n c = self.eq_mas1(dims, c, [])\r\n b = []\r\n b.append(a)\r\n b.append('PROC')\r\n self.vars[line[1]] = b\r\n d = []\r\n d.append(c)\r\n d.append('PROC')\r\n self.ident[line[1]] = d\r\n else:\r\n self.proced(line[2])\r\n self.proced(line[3])\r\n self.proced(line[4])\r\n pres = self.vars[line[1]][0]\r\n dims = self.list_dim(line[3])\r\n if isinstance(dims, int):\r\n dims = [dims]\r\n dims.insert(0, self.eval(line[2]))\r\n list = self.new_list(dims, pres)\r\n new = self.make_mas(list[0], list[1:])\r\n a = self.make_new_mas(new, pres)\r\n a = self.eq_mas(dims, a, line[4]) \r\n b = []\r\n b.append(a)\r\n b.append('PROC')\r\n self.vars[line[1]] = b\r\n pres1 = self.ident[line[1]][0]\r\n new1 = self.make_mas1(list[0], list[1:])\r\n c = self.make_new_mas(new1, pres1)\r\n self.eq_mas1(dims, c, [])\r\n d = []\r\n d.append(c)\r\n d.append('PROC')\r\n self.ident[line[1]] = d\r\n elif op == 'PRINT':\r\n if line[1] in self.vars:\r\n print (self.vars[line[1]][0])\r\n print(self.ident[line[1]][0])\r\n else:\r\n print('Uninitialized variable', line[1])\r\n elif op == 'PRINTM':\r\n for i in range(len(self.vars[line[1]][0])):\r\n for j in range(len(self.vars[line[1]][0][i])):\r\n print (self.vars[line[1]][0][i][j], end = '')\r\n print()\r\n elif op == 'BOOLE':\r\n if not (line[1] in self.vars) or 'INT' in self.vars[line[1]][1]:\r\n a = [0]\r\n b = []\r\n b.append(a)\r\n b.append('BOOL')\r\n c = []\r\n c.append([])\r\n d = []\r\n d.append(c)\r\n d.append('BOOL')\r\n self.vars[line[1]] = b\r\n self.ident[line[1]] = d\r\n else:\r\n self.proced(line)\r\n elif op == 'INT':\r\n if not (line[1] in self.vars) or 'BOOL' in self.vars[line[1]][1]:\r\n a = [0]\r\n b = []\r\n b.append(a)\r\n b.append('INT')\r\n c = []\r\n c.append([])\r\n d = []\r\n d.append(c)\r\n d.append('INT')\r\n self.vars[line[1]] = b\r\n self.ident[line[1]] = d\r\n else:\r\n self.proced(line)\r\n elif op == 'PROC':\r\n if not (line[1] in self.vars) or 'BOOL' in self.vars[line[1]][1] or 'INT' in self.vars[line[1]][1] or len(self.vars[line[1]][0]) == 0:\r\n a = []\r\n b = []\r\n a.append(0)\r\n b.append(a)\r\n b.append('PROC')\r\n c = []\r\n c.append([])\r\n d = []\r\n d.append(c)\r\n d.append('PROC')\r\n self.vars[line[1]] = b\r\n self.ident[line[1]] = d\r\n else:\r\n self.proced(line)\r\n self.ex(self.vars[line[1]][0][0])\r\n elif op == 'MASINT':\r\n if len(line) == 3:\r\n if not(line[1] in self.vars) or 'BOOL' in self.vars[line[1]][1] or 'PROC' in self.vars[line[1]][1]:\r\n self.proced(line[2])\r\n i = self.eval(line[2]) + 1\r\n a = []\r\n c = []\r\n for j in range (i):\r\n a.append(0)\r\n c.append([])\r\n b = []\r\n b.append(a)\r\n b.append('INT')\r\n d = []\r\n d.append(c)\r\n d.append('INT')\r\n self.vars[line[1]] = b\r\n self.ident[line[1]] = d\r\n else:\r\n if len(self.vars[line[1]][0]) < self.eval(line[2]):\r\n self.proced(line[2])\r\n j = self.eval(line[2]) - len(self.vars[line[1]][0])\r\n for i in range(j + 1):\r\n self.vars[line[1]][0].append(0)\r\n self.ident[line[1]][0].append([])\r\n else:\r\n if not(line[1] in self.vars) or 'BOOL' in self.vars[line[1]][1] or 'PROC' in self.vars[line[1]][1]:\r\n a = self.make_mas(line[2], line[3])\r\n c = self.make_mas1(line[2], line[3])\r\n self.proced(line[2])\r\n self.proced(line[3])\r\n b = []\r\n b.append(a)\r\n b.append('INT')\r\n self.vars[line[1]] = b\r\n d = []\r\n d.append(c)\r\n d.append('INT')\r\n self.ident[line[1]] = d\r\n else:\r\n self.proced(line[2])\r\n self.proced(line[3])\r\n pres = self.vars[line[1]][0]\r\n dims = self.list_dim(line[3])\r\n if isinstance(dims, int):\r\n dims = [dims]\r\n dims.insert(0, self.eval(line[2]))\r\n list = self.new_list(dims, pres)\r\n new = self.make_mas(list[0], list[1:])\r\n a = self.make_new_mas(new, pres)\r\n b = []\r\n b.append(a)\r\n b.append('INT')\r\n self.vars[line[1]] = b\r\n pres1 = self.ident[line[1]][0]\r\n new1 = self.make_mas1(list[0], list[1:])\r\n c = self.make_new_mas(new1, pres1)\r\n d = []\r\n d.append(c)\r\n d.append('INT')\r\n self.ident[line[1]] = d\r\n elif op =='MASBOOL':\r\n if len(line) == 3:\r\n if not(line[1] in self.vars) or 'INT' in self.vars[line[1]][1] or 'PROC' in self.vars[line[1]][1]:\r\n i = self.eval(line[2]) + 1\r\n self.proced(line[2])\r\n a = []\r\n c = []\r\n for j in range (i):\r\n a.append(0)\r\n c.append([])\r\n b = []\r\n b.append(a)\r\n b.append('BOOL')\r\n self.vars[line[1]] = b\r\n d = []\r\n d.append(c)\r\n d.append('BOOL')\r\n self.ident[line[1]] = d\r\n else:\r\n if len(self.vars[line[1]][0]) < self.eval(line[2]):\r\n self.proced(line[2])\r\n j = self.eval(line[2]) - len(self.vars[line[1]][0])\r\n for i in range(j + 1):\r\n self.vars[line[1]][0].append(0)\r\n self.ident[line[1]][0].append([])\r\n else:\r\n if not(line[1] in self.vars) or 'INT' in self.vars[line[1]][1]:\r\n self.proced(line[2])\r\n self.proced(line[3])\r\n a = self.make_mas(line[2], line[3])\r\n c = self.make_mas1(line[2], line[3])\r\n b = []\r\n b.append(a)\r\n b.append('BOOL')\r\n self.vars[line[1]] = b\r\n d = []\r\n d.append(c)\r\n d.append('BOOL')\r\n self.ident[line[1]] = b\r\n else:\r\n self.proced(line[2])\r\n self.proced(line[3])\r\n pres = self.vars[line[1]][0]\r\n dims = self.list_dim(line[3])\r\n if isinstance(dims, int):\r\n dims = [dims]\r\n dims.insert(0, self.eval(line[2]))\r\n list = self.new_list(dims, pres)\r\n new = self.make_mas(list[0], list[1:])\r\n a = self.make_new_mas(new, pres)\r\n b = []\r\n b.append(a)\r\n b.append('BOOL')\r\n self.vars[line[1]] = b\r\n pres1 = self.ident[line[1]][0]\r\n new1 = self.make_mas1(list[0], list[1:])\r\n c = self.make_new_mas(new1, pres1)\r\n d = []\r\n d.append(c)\r\n d.append('INT')\r\n self.ident[line[1]] = d\r\n elif op == 'MASPROC':\r\n if len(line) == 3:\r\n if not (line[1] in self.vars) or 'INT' in self.vars[line[1]][1] or 'BOOL' in self.vars[line[1]][1]:\r\n self.proced(line[2])\r\n i = self.eval(line[2]) + 1\r\n a = []\r\n c = []\r\n for j in range (i):\r\n a.append(0)\r\n c.append([])\r\n b = []\r\n b.append(a)\r\n b.append('PROC')\r\n self.vars[line[1]] = b\r\n d = []\r\n d.append(c)\r\n d.append('PROC')\r\n self.ident[line[1]] = d\r\n else:\r\n if len(self.vars[line[1]][0]) < self.eval(line[2]):\r\n self.proced(line[2])\r\n j = self.eval(line[2]) - len(self.vars[line[1]][0])\r\n for i in range(j + 1):\r\n self.vars[line[1]][0].append(0)\r\n self.ident[line[1]][0].append([])\r\n else:\r\n l = self.vars[line[1]][0][self.eval(line[2])]\r\n if l[0] == 'STATGROUP':\r\n self.proced(line[2])\r\n self.ex(self.vars[line[1]][0][self.eval(line[2])])\r\n else:\r\n print('It is not procedure')\r\n else:\r\n if not (line[1] in self.vars) or 'INT' in self.vars[line[1]][1] or 'BOOL' in self.vars[line[1]][1]:\r\n a = self.make_mas(line[2], line[3])\r\n c = self.make_mas1(line[2], line[3])\r\n self.proced(line[2])\r\n self.proced(line[3])\r\n b = []\r\n b.append(a)\r\n b.append('PROC')\r\n self.vars[line[1]] = b\r\n d = []\r\n d.append(c)\r\n d.append('PROC')\r\n self.ident[line[1]] = d\r\n else:\r\n self.proced(line[2])\r\n self.proced(line[3])\r\n pres = self.vars[line[1]][0]\r\n dims = self.list_dim(line[3])\r\n if isinstance(dims, int):\r\n dims = [dims]\r\n dims.insert(0, self.eval(line[2]))\r\n list = self.new_list(dims, pres)\r\n new = self.make_mas(list[0], list[1:])\r\n a = self.make_new_mas(new, pres)\r\n b = []\r\n b.append(a)\r\n b.append('PROC')\r\n self.vars[line[1]] = b\r\n pres1 = self.ident[line[1]][0]\r\n new1 = self.make_mas1(list[0], list[1:])\r\n c = self.make_new_mas(new1, pres1)\r\n d = []\r\n d.append(c)\r\n d.append('PROC')\r\n self.ident[line[1]] = d\r\n l = self.elem(self.vars[line[1]][0], dims)\r\n if l != 0:\r\n if l[0] == 'STATGROUP':\r\n self.ex(l)\r\n else:\r\n print('It is list, not procedure, or procedure is empty')\r\n elif op == 'BINOP':\r\n if line[1] == ',#':\r\n self.proced(line[2])\r\n self.plus(line[2])\r\n else:\r\n self.proced(line[2])\r\n self.minus(line[2])\r\n elif op == 'STATGROUP':\r\n self.ex(line[1])\r\n if len(line) == 3:\r\n self.ex(line[2])\r\n elif op == 'EQPROC':\r\n if len(line) == 3:\r\n self.proced(line[2])\r\n stat = self.eval(line[2])\r\n proc = self.evproc(line[2])\r\n if not (line[1] in self.vars) or 'INT' in self.vars[line[1]][1] or 'BOOL' in self.vars[line[1]][1]:\r\n a = []\r\n a.append(stat)\r\n b = []\r\n b.append(a)\r\n b.append('PROC')\r\n self.vars[line[1]] = b\r\n c = []\r\n c.append(proc)\r\n d = []\r\n d.append(c)\r\n d.append('PROC')\r\n self.ident[line[1]] = d\r\n else:\r\n self.vars[line[1]][0][0] = stat\r\n self.ident[line[1]][0][0] = proc\r\n elif len(line) == 4:\r\n self.proced(line[2])\r\n self.proced(line[3])\r\n stat = self.eval(line[3])\r\n proc = self.evproc(line[3])\r\n if not (line[1] in self.vars) or 'INT' in self.vars[line[1]][1] or 'BOOL' in self.vars[line[1]][1]:\r\n a = []\r\n c = [] \r\n j = self.eval(line[2])\r\n for i in range(j + 1):\r\n if i == j:\r\n a.append(stat)\r\n c.append(proc)\r\n else:\r\n a.append(0)\r\n c.append([])\r\n b = []\r\n b.append(a)\r\n b.append('PROC')\r\n self.vars[line[1]] = b\r\n d = []\r\n d.append(c)\r\n d.append('PROC')\r\n self.ident[line[1]] = d\r\n else:\r\n if len(self.vars[line[1]]) >= self.eval(line[2]):\r\n self.vars[line[1]][0][self.eval(line[2])] = stat\r\n self.ident[line[1]][0][self.eval(line[2])] = proc\r\n else:\r\n a = []\r\n d = []\r\n j = self.eval(line[2])\r\n c = len(self.vars[line[1]][0])\r\n for i in range(j + 1):\r\n if i < c:\r\n a.append(self.vars[line[1]][0][i])\r\n d.append(self.ident[line[1]][0][i])\r\n elif i == j:\r\n a.append(stat)\r\n d.append(proc)\r\n else:\r\n a.append(0)\r\n d.append([])\r\n b = []\r\n b.append(a)\r\n b.append('PROC')\r\n self.vars[line[1]] = b\r\n e = []\r\n e.append(d)\r\n e.append('PROC')\r\n self.ident[line[1]] = e\r\n else:\r\n self.proced(line[2])\r\n self.proced(line[3])\r\n self.proced(line[4])\r\n stat = self.eval(line[4])\r\n proc = self.evproc(line[4])\r\n if not (line[1] in self.vars) or 'INT' in self.vars[line[1]][1] or 'BOOL' in self.vars[line[1]][1]:\r\n a = self.make_mas(line[2], line[3])\r\n dims = self.list_dim(line[3])\r\n if isinstance(dims, int):\r\n dims = [dims]\r\n dims.insert(0, self.eval(line[2]))\r\n a = self.eq_mas(dims, a, stat)\r\n b = []\r\n b.append(a)\r\n b.append('PROC')\r\n self.vars[line[1]] = b\r\n c = self.make_mas1(line[2], line[3])\r\n dims = self.list_dim(line[3])\r\n if isinstance(dims, int):\r\n dims = [dims]\r\n dims.insert(0, self.eval(line[2]))\r\n c = self.eq_mas1(dims, c, proc)\r\n d = []\r\n d.append(c)\r\n d.append('PROC')\r\n self.ident[line[1]] = d\r\n else:\r\n pres = self.vars[line[1]][0]\r\n dims = self.list_dim(line[3])\r\n if isinstance(dims, int):\r\n dims = [dims]\r\n dims.insert(0, self.eval(line[2]))\r\n list = self.new_list(dims, pres)\r\n new = self.make_mas(list[0], list[1:])\r\n a = self.make_new_mas(new, pres)\r\n a = self.eq_mas(dims, a, stat) \r\n b = []\r\n b.append(a)\r\n b.append('PROC')\r\n self.vars[line[1]] = b\r\n pres1 = self.ident[line[1]][0]\r\n new1 = self.make_mas1(list[0], list[1:])\r\n c = self.make_new_mas(new1, pres1)\r\n c = self.eq_mas1(dims, c, proc) \r\n d = []\r\n d.append(c)\r\n d.append('PROC')\r\n self.ident[line[1]] = d\r\n elif op == 'IF':\r\n self.proced(line[1])\r\n if self.eval(line[1]) == True:\r\n self.ex(line[2])\r\n elif op == 'IDENT':\r\n if len(line) == 5:\r\n if line[1] == '.':\r\n lin1 = ('BOOLE', line[2])\r\n elif line[1] == ',':\r\n lin1 = ('INT', line[2])\r\n elif line[1] == '$':\r\n lin1 = ('PROC', line[2])\r\n if self.test_rec(lin1, line[4]) == False:\r\n print('Recursion')\r\n raise RuntimeError\r\n self.proced(line[4])\r\n if line[3] == '@':\r\n return self.bind(lin1, line[4])\r\n else:\r\n return self.unbind(lin1, line[4])\r\n elif len(line) == 6:\r\n if line[1] == '.':\r\n lin1 = ('MASBOOL', line[2], line[3])\r\n elif line[1] == ',':\r\n lin1 = ('MASINT', line[2], line[3])\r\n elif line[1] == '$':\r\n lin1 = ('MASPROC', line[2], line[3])\r\n if self.test_rec(lin1, line[5]) == False:\r\n print('Recursion')\r\n raise RuntimeError\r\n self.proced(line[5])\r\n if line[4] == '@':\r\n return self.bind(lin1, line[5])\r\n else:\r\n return self.unbind(lin1, line[5])\r\n else:\r\n if line[1] == '.':\r\n lin1 = ('MASBOOL', line[2], line[3], line[4])\r\n elif line[1] == ',':\r\n lin1 = ('MASINT', line[2], line[3], line[4])\r\n elif line[1] == '$':\r\n lin1 = ('MASPROC', line[2], line[3], line[4])\r\n if self.test_rec(lin1, line[6]) == False:\r\n print('Recursion')\r\n raise RuntimeError\r\n self.proced(line[6])\r\n if line[5] == '@':\r\n return self.bind(lin1, line[6])\r\n else:\r\n return self.unbind(lin1, line[6])\r\n elif op == 'LABEL':\r\n self.label.append(line[1])\r\n elif op == 'GOLABEL':\r\n if self.eval(line[1]) == True:\r\n if line[2] in self.label:\r\n flag = 0\r\n for lin in self.prog.values():\r\n if self.maze['success'] == 1:\r\n break\r\n elif self.maze['success'] == -1:\r\n break\r\n if line is None:\r\n continue\r\n if flag == 0:\r\n if lin != ('LABEL', line[2]):\r\n continue\r\n flag = 1\r\n continue\r\n else:\r\n if lin != line:\r\n self.ex(lin)\r\n else:\r\n self.ex(lin)\r\n break\r\n elif op == 'MOVE':\r\n return self.eval(line)\r\n\r\n\r\n \r\n def initmaze(self):\r\n self.maze['coord'] = [4, 4, 0]\r\n b = []\r\n a = [1,1,1,1,1,1,1,1,1,1]\r\n b.append(a)\r\n a = [1,0,1,0,0,1,1,0,0,1]\r\n b.append(a)\r\n a = [1,0,0,0,0,1,1,0,0,1]\r\n b.append(a)\r\n a = [1,0,0,0,0,1,1,0,0,1]\r\n b.append(a)\r\n a = [1,0,0,0,0,1,1,0,0,1]\r\n b.append(a)\r\n a = [1,0,0,0,0,0,0,0,0,1]\r\n b.append(a)\r\n a = [1,0,0,0,0,0,0,0,0,1]\r\n b.append(a)\r\n a = [1,1,1,1,1,0,0,5,0,1]\r\n b.append(a)\r\n self.maze['maze'] = b\r\n self.maze['success'] = 0\r\n self.maze['tp'] = 3\r\n\r\n\r\n def run(self):\r\n self.vars = {}\r\n self.ident = {}\r\n self.label = []\r\n self.maze = {}\r\n self.initmaze()\r\n for line in self.prog.values():\r\n if self.maze['success'] == 1:\r\n break\r\n elif self.maze['success'] == -1:\r\n break\r\n if line is None:\r\n continue\r\n # print(self.vars)\r\n self.ex(line)" }, { "alpha_fraction": 0.4618556797504425, "alphanum_fraction": 0.4639175236225128, "avg_line_length": 17.426666259765625, "blob_id": "bb1b128aa84a94caf11ee10d4fc957159130e29e", "content_id": "143cf17a433850cd14e864ead9faf5554cc81ed1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1455, "license_type": "no_license", "max_line_length": 89, "num_lines": 75, "path": "/lexer2.py", "repo_name": "AstroMaxx/Interpreter", "src_encoding": "UTF-8", "text": "from ply import lex\r\n\r\nkeywords = (\r\n\t'T', 'F', 'PLEASE', 'EQ', 'PRINT', 'PRINTMAT', 'MO', 'NP', 'MF', 'MB', 'MR', 'ML', 'TP',\r\n)\r\n\r\ntokens = keywords + (\r\n\t'EQUALS', 'PLUS', 'MINUS', 'INTEGER', 'BOOLEAN', 'LPAREN', 'RPAREN',\r\n 'COMMA', 'POINT', 'LSQPAREN', 'RSQPAREN', 'ID', \r\n 'NEWLINE', 'DOLLAR', 'BEGIN', 'END', 'ERROR', 'DOG', 'PIERCE',\r\n 'WAVE', 'DOUB', 'DASH', 'PERC',\r\n)\r\n\r\nt_ignore = ' \\t'\r\n\r\nt_EQUALS = r'\\<\\-'\r\nt_PLUS = r'\\,\\#'\r\nt_MINUS = r'\\,\\*'\r\nt_LPAREN = r'\\('\r\nt_RPAREN = r'\\)'\r\nt_COMMA = r'\\,'\r\nt_POINT = r'\\.'\r\nt_LSQPAREN = r'\\['\r\nt_RSQPAREN = r'\\]'\r\nt_DOLLAR = r'\\$'\r\nt_WAVE = r'~'\r\nt_DOG = r'\\@'\r\nt_PERC = r'\\%'\r\nt_PIERCE = r'\\.\\#'\r\nt_DOUB = r'\\:'\r\nt_DASH = r'\\-'\r\nt_BEGIN = r'\\{'\r\nt_END = r'\\}'\r\n\r\n\r\ndef t_INTEGER(t):\r\n r'\\d+'\r\n try:\r\n t.value = int(t.value)\r\n except ValueError:\r\n print(\"Integer value is wrong %d\", t.value)\r\n t.value = 0\r\n return t\r\n\r\n\r\ndef t_NEWLINE(t):\r\n r'\\n'\r\n t.lexer.lineno += 1\r\n t.lexer.lexposition = t.lexer.lexpos\r\n return t\r\n\r\ndef t_ID(t):\r\n r'[a-zA-Z]+'\r\n if t.value in keywords:\r\n t.type = t.value\r\n if t.value == 'T' or t.value == 'F':\r\n t.type = 'BOOLEAN'\r\n return t\r\n\r\ndef t_ANY_error(t):\r\n t.type = 'ERROR'\r\n t.value = 4\r\n return t\r\n\r\n\r\nlex.lex()\r\n\r\n'''while True:\r\n lexer.input(input())\r\n\r\n while True:\r\n tok = lexer.token()\r\n if not tok:\r\n break\r\n print(tok)'''" }, { "alpha_fraction": 0.5437454581260681, "alphanum_fraction": 0.564507782459259, "avg_line_length": 30.384614944458008, "blob_id": "ee3087ea16d18e920b59be8da1c3e56e153c27eb", "content_id": "c78aaa4ceafac44847fa1ad06af6fdf96b34deaf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9681, "license_type": "no_license", "max_line_length": 124, "num_lines": 299, "path": "/parser2.py", "repo_name": "AstroMaxx/Interpreter", "src_encoding": "UTF-8", "text": "from ply import *\r\nimport lexer2\r\n\r\ntokens = lexer2.tokens\r\n\r\n\r\ndef p_program(p):\r\n '''program : program statement\r\n | program statgroup\r\n | statement'''\r\n if len(p) == 2 and p[1]:\r\n p.counter = 0\r\n p[0] = {}\r\n p[0][p.counter] = p[1]\r\n p.counter += 1\r\n # print(p[0])\r\n elif len(p) == 3:\r\n p[0] = p[1]\r\n if not p[0]:\r\n p[0] = {}\r\n if p[2]:\r\n stat = p[2]\r\n p[0][p.counter] = stat\r\n p.counter += 1\r\n\r\n\r\ndef p_statgroup(p):\r\n 'statgroup : statement'\r\n p[0] = ('STATGROUP', p[1])\r\n\r\ndef p_statgroup_cont(p):\r\n '''statgroup : statgroup statement\r\n | statgroup command\r\n | statgroup expression'''\r\n p[0] = ('STATGROUP', p[1], p[2])\r\n\r\ndef p_expression_integer(p):\r\n 'expression : INTEGER'\r\n p[0] = ('INTEG', p[1])\r\n\r\ndef p_expression_boolean(p):\r\n 'expression : BOOLEAN'\r\n p[0] = ('BOOL', p[1])\r\n\r\ndef p_expression_id1(p):\r\n 'expression : COMMA INTEGER'\r\n p[0] = ('INT', p[2])\r\n\r\ndef p_expression_id2(p):\r\n 'expression : POINT INTEGER'\r\n p[0] = ('BOOLE', p[2])\r\n\r\ndef p_expression_massiv1(p):\r\n 'expression : COMMA INTEGER DOUB expression'\r\n p[0] = ('MASINT', p[2], p[4])\r\n\r\ndef p_expression_massiv2(p):\r\n 'expression : POINT INTEGER DOUB expression'\r\n p[0] = ('MASBOOL', p[2], p[4])\r\n\r\n\r\ndef p_expression_binop(p):\r\n '''expression : PLUS expression\r\n | MINUS expression'''\r\n p[0] = ('BINOP', p[1], p[2])\r\n\r\ndef p_expression_massiv3(p):\r\n 'expression : COMMA INTEGER DOUB expression DASH dims'\r\n p[0] = ('MASINT', p[2], p[4], p[6])\r\n\r\ndef p_expression_massiv4(p):\r\n 'expression : POINT INTEGER DOUB expression DASH dims'\r\n p[0] = ('MASBOOL', p[2], p[4], p[6])\r\n\r\ndef p_expression_id3(p):\r\n 'expression : DOLLAR INTEGER'\r\n p[0] = ('PROC', p[2])\r\n\r\ndef p_expression_masproc(p):\r\n 'expression : DOLLAR INTEGER DOUB expression'\r\n p[0] = ('MASPROC', p[2], p[4])\r\n\r\ndef p_expression_masproc1(p):\r\n 'expression : DOLLAR INTEGER DOUB expression DASH dims'\r\n p[0] = ('MASPROC', p[2], p[4], p[6])\r\n\r\ndef p_command_id(p):\r\n 'command : WAVE INTEGER'\r\n p[0] = ('LABEL', p[2])\r\n\r\ndef p_command_assign4(p):\r\n 'command : COMMA INTEGER DOUB expression DASH dims EQUALS expression'\r\n p[0] = ('ASSINT', p[2], p[4], p[6], p[8])\r\n\r\ndef p_command_assign5(p):\r\n 'command : POINT INTEGER DOUB expression DASH dims EQUALS expression'\r\n p[0] = ('ASSBOOL', p[2], p[4], p[6], p[8])\r\n\r\ndef p_relexpr(p):\r\n '''relexpr : expression EQ expression\r\n | expression MO expression\r\n | relexpr EQ expression\r\n | expression EQ NP'''\r\n p[0] = ('RELOP', p[1], p[2], p[3])\r\n\r\ndef p_command_print(p):\r\n '''command : PRINT COMMA INTEGER\r\n | PRINT POINT INTEGER '''\r\n p[0] = ('PRINT', p[3])\r\n\r\ndef p_command_printmatr(p):\r\n 'command : PRINTMAT COMMA INTEGER'\r\n p[0] = ('PRINTM', p[3])\r\n \r\ndef p_command_proc(p):\r\n 'command : DOLLAR INTEGER EQUALS BEGIN statgroup END'\r\n p[0] = ('ASSPROC', p[2], p[5])\r\n\r\ndef p_command_proc1(p):\r\n 'command : DOLLAR INTEGER DOUB expression EQUALS BEGIN statgroup END'\r\n p[0] = ('ASSPROC', p[2], p[4], p[7])\r\n\r\ndef p_command_proc2(p):\r\n 'command : DOLLAR INTEGER DOUB expression DASH dims EQUALS BEGIN statgroup END'\r\n p[0] = ('ASSPROC', p[2], p[4], p[6], p[9])\r\n\r\ndef p_command_proc3(p):\r\n 'command : DOLLAR INTEGER EQUALS expression'\r\n p[0] = ('EQPROC', p[2], p[4])\r\n\r\ndef p_command_proc4(p):\r\n 'command : DOLLAR INTEGER DOUB expression EQUALS expression'\r\n p[0] = ('EQPROC', p[2], p[4], p[6])\r\n\r\ndef p_command_proc5(p):\r\n 'command : DOLLAR INTEGER DOUB expression DASH dims EQUALS expression'\r\n p[0] = ('EQPROC', p[2], p[4], p[6], p[8])\r\n\r\ndef p_dims_1(p):\r\n 'dims : expression'\r\n p[0] = p[1]\r\n\r\ndef p_dims_2(p):\r\n '''dims : expression COMMA expression\r\n | dims COMMA expression'''\r\n p[0] = ('DIMS', p[1], p[3])\r\n\r\ndef p_dims1(p):\r\n '''dims : expression error expression\r\n | dims error expression'''\r\n p[0] = None\r\n #print('Error in ,')\r\n\r\ndef p_logic(p):\r\n '''logic : relexpr COMMA expression\r\n | expression COMMA expression\r\n | relexpr COMMA relexpr\r\n | expression COMMA relexpr\r\n | logic COMMA relexpr\r\n | logic COMMA expression'''\r\n p[0] = ('LOGIC', p[1], p[3])\r\n\r\ndef p_logic_error(p):\r\n '''logic : relexpr error expression\r\n | expression error expression\r\n | relexpr error relexpr\r\n | expression error relexpr\r\n | logic error relexpr\r\n | logic error expression'''\r\n p[0] = None\r\n #print('Error in ,')\r\n\r\ndef p_expression_pierce1(p):\r\n '''expression : PIERCE relexpr\r\n | PIERCE expression'''\r\n p[0] = ('PIERCE', p[2])\r\n\r\ndef p_expression_pierce2(p):\r\n 'expression : PIERCE BEGIN logic END'\r\n p[0] = ('PIERCES', p[3])\r\n\r\ndef p_expression_pierce2_error(p):\r\n 'expression : PIERCE error logic END'\r\n p[0] = None\r\n #print ('Error in {')\r\n\r\ndef p_command_if1(p):\r\n '''command : LPAREN expression RPAREN statement\r\n | LPAREN relexpr RPAREN statement'''\r\n p[0] = ('IF', p[2], p[4])\r\n\r\ndef p_command_if2(p):\r\n '''command : LPAREN expression RPAREN BEGIN statgroup END\r\n | LPAREN relexpr RPAREN BEGIN statgroup END\r\n | LPAREN logic RPAREN BEGIN statgroup END'''\r\n p[0] = ('IF', p[2], p[5])\r\n\r\ndef p_command_if2_error(p):\r\n '''command : LPAREN expression error BEGIN statgroup END\r\n | LPAREN relexpr error BEGIN statgroup END'''\r\n p[0] = None\r\n #print('Error in )')\r\n\r\ndef p_command_np(p):\r\n 'command : NP'\r\n p[0] = None\r\n\r\ndef p_command_assign(p):\r\n 'command : COMMA INTEGER EQUALS expression'\r\n p[0] = ('ASSINT', p[2], p[4])\r\n\r\ndef p_command_assign1(p):\r\n 'command : COMMA INTEGER DOUB expression EQUALS expression'\r\n p[0] = ('ASSINT', p[2], p[4], p[6])\r\n\r\ndef p_command_assign2(p):\r\n 'command : POINT INTEGER EQUALS expression'\r\n p[0] = ('ASSBOOL', p[2], p[4])\r\n\r\ndef p_command_assign3(p):\r\n 'command : POINT INTEGER DOUB expression EQUALS expression'\r\n p[0] = ('ASSBOOL', p[2], p[4], p[6])\r\n\r\ndef p_expression_ident1(p):\r\n '''expression : POINT INTEGER DOG expression\r\n | POINT INTEGER PERC expression\r\n | COMMA INTEGER DOG expression\r\n | COMMA INTEGER PERC expression\r\n | DOLLAR INTEGER DOG expression\r\n | DOLLAR INTEGER PERC expression'''\r\n p[0] = ('IDENT', p[1], p[2], p[3], p[4])\r\n\r\ndef p_expression_ident2(p):\r\n '''expression : POINT INTEGER DOUB expression DOG expression\r\n | POINT INTEGER DOUB expression PERC expression\r\n | COMMA INTEGER DOUB expression DOG expression\r\n | COMMA INTEGER DOUB expression PERC expression\r\n | DOLLAR INTEGER DOUB expression DOG expression\r\n | DOLLAR INTEGER DOUB expression PERC expression'''\r\n p[0] = ('IDENT', p[1], p[2], p[4], p[5], p[6])\r\n\r\ndef p_expression_ident3(p):\r\n '''expression : POINT INTEGER DOUB expression DASH dims DOG expression\r\n | POINT INTEGER DOUB expression DASH dims PERC expression\r\n | COMMA INTEGER DOUB expression DASH dims DOG expression\r\n | COMMA INTEGER DOUB expression DASH dims PERC expression\r\n | DOLLAR INTEGER DOUB expression DASH dims DOG expression\r\n | DOLLAR INTEGER DOUB expression DASH dims PERC expression'''\r\n p[0] = ('IDENT', p[1], p[2], p[4], p[6], p[7], p[8])\r\n\r\ndef p_command_label(p):\r\n '''command : LSQPAREN LSQPAREN expression RSQPAREN RSQPAREN LSQPAREN PLEASE RSQPAREN WAVE INTEGER\r\n | LSQPAREN LSQPAREN relexpr RSQPAREN RSQPAREN LSQPAREN PLEASE RSQPAREN WAVE INTEGER'''\r\n p[0] = ('GOLABEL', p[3], p[10])\r\n\r\ndef p_command_ladel_error_1(p):\r\n '''command : LSQPAREN error expression RSQPAREN RSQPAREN LSQPAREN PLEASE RSQPAREN WAVE INTEGER\r\n | LSQPAREN error relexpr RSQPAREN RSQPAREN LSQPAREN PLEASE RSQPAREN WAVE INTEGER\r\n | LSQPAREN LSQPAREN expression RSQPAREN RSQPAREN error PLEASE RSQPAREN WAVE INTEGER\r\n | LSQPAREN LSQPAREN relexpr RSQPAREN RSQPAREN error PLEASE RSQPAREN WAVE INTEGER'''\r\n p[0] = None\r\n #print('Error in [')\r\n\r\ndef p_command_ladel_error_2(p):\r\n '''command : LSQPAREN LSQPAREN expression RSQPAREN error LSQPAREN PLEASE RSQPAREN WAVE INTEGER\r\n | LSQPAREN LSQPAREN relexpr RSQPAREN error LSQPAREN PLEASE RSQPAREN WAVE INTEGER\r\n | LSQPAREN LSQPAREN expression RSQPAREN RSQPAREN LSQPAREN PLEASE error WAVE INTEGER\r\n | LSQPAREN LSQPAREN relexpr RSQPAREN RSQPAREN LSQPAREN PLEASE error WAVE INTEGER'''\r\n p[0] = None\r\n #print('Error in ]')\r\n\r\ndef p_expression_move(p):\r\n '''expression : MF\r\n | MB\r\n | MR\r\n | ML\r\n | TP'''\r\n p[0] = ('MOVE', p[1])\r\n\r\ndef p_statement(p):\r\n '''statement : command NEWLINE\r\n | command statement\r\n | expression NEWLINE\r\n | expression statement'''\r\n p[0] = p[1]\r\n\r\n\r\ndef p_error(t):\r\n # print(\"Syntax error at token\", t.type)\r\n print(\"Syntax error at '%s' at line '%s' at pos '%s'\" % (t.value, t.lexer.lineno, t.lexer.lexpos - t.lexer.lexposition))\r\n\r\nparser = yacc.yacc()\r\n\r\ndef parse(data, debug=0):\r\n parser.error = 0\r\n p = parser.parse(data, debug=debug)\r\n if parser.error:\r\n return None\r\n return p" }, { "alpha_fraction": 0.6824034452438354, "alphanum_fraction": 0.7038626670837402, "avg_line_length": 15.642857551574707, "blob_id": "967e3198c0f68a5d33b8a83fa50c4c56b726cdc2", "content_id": "586037c8470ea04860591efbd3abaa0a2c37e01d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 233, "license_type": "no_license", "max_line_length": 36, "num_lines": 14, "path": "/main.py", "repo_name": "AstroMaxx/Interpreter", "src_encoding": "UTF-8", "text": "import sys\nimport parser2\nimport interp2\n\ndata = open('D:/laba2/robot').read()\nprog = parser2.parse(data)\nif not prog:\n raise SystemExit\nb = interp2.Interp(prog)\ntry:\n b.run()\n raise SystemExit\nexcept RuntimeError:\n pass\n" } ]
4
dsmiff/datapalace
https://github.com/dsmiff/datapalace
d47cf35c5e721f66da7864ee7cb8e74171b1c21c
b27e2839c88c68da9ada3ae8dad732da16b4cc0a
340c316a002a6e4d65d30da4338810095f185f81
refs/heads/master
2021-03-24T12:34:37.069539
2017-10-05T09:12:33
2017-10-05T09:12:33
104,686,990
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.49806949496269226, "alphanum_fraction": 0.5019304752349854, "avg_line_length": 21.521739959716797, "blob_id": "0699b2413121535275a8e1ec39c59ab2469ace24", "content_id": "1d77d26ecf68c5a22be1df1be1f1abcc403bd57e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 518, "license_type": "no_license", "max_line_length": 74, "num_lines": 23, "path": "/datapalace/backup_master_database.py", "repo_name": "dsmiff/datapalace", "src_encoding": "UTF-8", "text": "'''\nDominic Smith <domlucasmith@gmail.com>\nSentiance Assignment 1.1\n'''\n\nimport os, sys\nimport re\nimport logging\n\nfrom datasets.Datasets import Datasets\nfrom core.Parser import args\n\nargs_dict = vars(args)\nlogging.basicConfig(level = logging.getLevelName(args.logging_level))\n##__________________________________________________________________||\ndef main():\n\n dataset = Datasets(**args_dict)\n dataset.backupWorkspace()\n\n##__________________________________________________________________|| \nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.6961583495140076, "alphanum_fraction": 0.7031431794166565, "avg_line_length": 16.200000762939453, "blob_id": "502b3852c8e8b9c6f5bfd7f1b35290c29582d88a", "content_id": "d4298d637e7bc64062bcd9716eb6c0fd8cc3fb8d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 859, "license_type": "no_license", "max_line_length": 113, "num_lines": 50, "path": "/README.md", "repo_name": "dsmiff/datapalace", "src_encoding": "UTF-8", "text": "# Datapalace\n\nMaster dataset creation\n\n## Getting Started\n\nsh package was installed with Anaconda.\nOtherwise it will use shutil automatically.\n\n### Installing\n\n```\ngit clone -o <target dir> git@github.com:dsmiff/datapalace.git --recursive\n```\n\n### Make a Master database\n\n```\ncd datapalace\npython create_master_database.py --in-dir <dir> --filesize <filesize> --structure <name1>,<size1>,<name2>,<size1>\n```\n\n### Update a Master database\n\n```\npython update_master_database.py --in-dir <dir> --structure <name1>,<size1>\n```\n\n### Backup a Master database\n\n```\npython backup_master_database.py --in-dir <dir> --out-dir <outdir>\n```\n\n\n# Foursquare - WIP\n\nScrape data off Foursquare and find a venue for a given latitude and longitude.\n\n### Requirements\n\nPandas\n```\npip install pandas\n```\nOr create a conda environment and set it up there.\n\n```\npython data_refresh.py\n```" }, { "alpha_fraction": 0.543379008769989, "alphanum_fraction": 0.577625572681427, "avg_line_length": 42.79999923706055, "blob_id": "e01db487aab42b61f908f08b81de44e72e9926d9", "content_id": "27c039f0e78f2b0893ee3cbf2bc3a2f1e3d7cb46", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 438, "license_type": "no_license", "max_line_length": 90, "num_lines": 10, "path": "/foursquare/core/Parser.py", "repo_name": "dsmiff/datapalace", "src_encoding": "UTF-8", "text": "# Dominic Smith <domlucasmith@gmail.com>\n\nimport argparse\n\n##__________________________________________________________________||\nparser = argparse.ArgumentParser()\nparser.add_argument('--lat', type = float, default = 40.7268, help = 'Input latitude')\nparser.add_argument('--long', type = float, default = -73.9972, help = 'Input longitude')\nparser.add_argument('--distance', type = int, default = 100, help = 'Measure of accuracy')\nargs = parser.parse_args()\n" }, { "alpha_fraction": 0.5817981958389282, "alphanum_fraction": 0.582348108291626, "avg_line_length": 37.284210205078125, "blob_id": "c33102e6a42fd7c8aaf597c86117258066b4269f", "content_id": "a18af0c19d0f79b91fe9b4aa2819596e94545c88", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3637, "license_type": "no_license", "max_line_length": 128, "num_lines": 95, "path": "/datapalace/datasets/Datasets.py", "repo_name": "dsmiff/datapalace", "src_encoding": "UTF-8", "text": "# Dominic Smith <domlucasmith@gmail.com>\n\nimport os, errno, sys\nimport logging\nfrom .Logic import Logic\n\n##__________________________________________________________________||\nclass Datasets(object):\n \"\"\"\n Args:\n in_dir : Input directory to create the Master directory.\n filesize: Size of the file under the subdirectory to the Master directory.\n structure: Structure under the Master directory. CSV input with subdirectory name and \n size of the subdirectory.\n force: Force remake of Master directory.\n dry_run: Perform a dry run.\n logging_level: Level of verbose from logging package.\n \"\"\"\n \n def __init__(self, in_dir, filesize, structure, out_dir, force, dry_run, logging_level):\n self.dir = in_dir\n self.file_size = filesize\n self.structure = structure\n self.out_dir = out_dir\n self.force = force\n self.dry_run = dry_run\n self.logging_level = logging.getLevelName(logging_level)\n self.logger = logging.getLogger('Master datasets')\n self.logic = Logic()\n\n def mkdir_p(self, dir):\n '''\n Make subdirectories for a given dir.\n ''' \n try:\n os.makedirs(dir)\n self.logger.info('created a directory, {}'.format(dir))\n except OSError as exc: # Python >2.5\n if exc.errno == errno.EEXIST and os.path.isdir(dir):\n self.logger.log(self.logging_level, 'tried to create a directory, {}. already existed'.format(dir))\n if self.force:\n self.logger.log(self.logging_level, 'force removal of directory, {}. remaking new directory'.format(dir)) \n import shutil\n shutil.rmtree(dir)\n os.makedirs(dir)\n pass\n else: raise\n \n def make_subdir(self):\n '''\n Make subdirectories and create and fill files.\n '''\n for subdir, dir_target_size in self.structure_dict.iteritems():\n self.full_path = os.path.join(self.dir, subdir)\n self.mkdir_p(self.full_path)\n self.logic.fill_subdir(self.full_path, self.file_size, dir_target_size)\n \n def update_subdir(self):\n '''\n Get the size of each subdirectory in self.structure_dict.\n Returns a dictionary where keys are the subdirectories and \n values are a tuple with (subdir_size, max_file_size)\n max_file_size is the largest file (in bytes) and represents \n the upper limit.\n '''\n for subdir, dir_target_size in self.structure_dict.iteritems():\n self.full_path = os.path.join(self.dir, subdir)\n size = self.logic.dir_size(self.full_path, False, True)\n self.logic.performUpdate(size)\n\n def backup_dir(self):\n self.top_tree = os.path.abspath(os.path.expanduser(self.dir))\n self.out_dir = os.path.abspath(os.path.expanduser(self.out_dir))\n self.logic.backupDir(self.top_tree, self.out_dir, self.dry_run)\n \n def beginWorkspace(self):\n '''\n Main method to execute the logic of making a database.\n '''\n self.structure_dict = self.logic.convert_structure(self.structure)\n self.mkdir_p(self.dir)\n self.make_subdir()\n\n def updateWorkspace(self):\n '''\n Main method to update the database.\n '''\n self.structure_dict = self.logic.convert_structure(self.structure)\n self.update_subdir()\n\n def backupWorkspace(self):\n '''\n Main method to backup an existing database.\n '''\n self.backup_dir()\n" }, { "alpha_fraction": 0.5693976283073425, "alphanum_fraction": 0.5717426538467407, "avg_line_length": 40.85889434814453, "blob_id": "65fce4f5d21af9fbe7fd5971364a53767f7fba1d", "content_id": "60a4c54d63e4959f77ca43b84fb4ef0b6559cfbc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6823, "license_type": "no_license", "max_line_length": 113, "num_lines": 163, "path": "/datapalace/datasets/Logic.py", "repo_name": "dsmiff/datapalace", "src_encoding": "UTF-8", "text": "# Dominic Smith <domlucasmith@gmail.com>\n\nimport os\nimport sys\nimport shutil\nimport itertools\nimport logging\nimport random\nfrom string import digits, ascii_uppercase\nfrom filecmp import cmp\n\n##__________________________________________________________________||\nclass Logic(object):\n def __init__(self):\n self.logger = logging.getLogger('Master datasets')\n self._results = { } \n\n def __repr__(self):\n return '{}({!r})'.format(self.__class__.__name__, self._results)\n\n def convert_to_bytes(self, target_size, reverse=False):\n '''\n Convert MB to bytes. If another unit is required, this code should change.\n '''\n conversion = 1024**2\n if reverse:\n # Reverse to convert bytes to MB\n bytes = target_size/conversion\n else:\n bytes = target_size*conversion\n self.logger.info('Input target file size {} is in MB, converting to bytes {}'.format(target_size, bytes))\n \n return bytes\n \n def convert_structure(self, structure):\n '''\n Convert third argument of script from CSVs to dictionary.\n Keys and Values are strings. Values need to be converted for later use.\n '''\n self.structure_list = structure.split(',')\n try:\n self.structure_dict = dict(itertools.izip_longest(*[iter(self.structure_list)] * 2,\n fillvalue=\"\"))\n except ValueError:\n print \"Could not convert list of file structure to dictionary : \", structure_list\n\n return self.structure_dict\n\n def fill_content_generator(self):\n '''\n Create a random alphanumeric string of a random size.\n '''\n chars = ascii_uppercase + digits\n return ''.join(random.choice(chars) for _ in range(random.randint(0,10))) + '\\n'\n\n def write_files(self, full_path, file_size, dir_target_size, fileId=0):\n '''\n file_size represents the upper limit on the size of each file.\n Instead of using two while loops, another method is \n to create X = dir_target_size/file_size number of files. \n By using int(X) to round to the lowest integer, the total size of the files \n should not exceed that of dir_target_size.\n\n NB: Another option is to assign the size to each file using :\n f = open('file1.txt', 'w')\n f.seek(file_size-1)\n f.write(self.fill_content_generator())\n f.close()\n But where's the fun in that?\n ''' \n\n dir_target_size = float(dir_target_size)\n dir_size_bytes = self.convert_to_bytes(dir_target_size)\n file_size = self.convert_to_bytes(file_size)\n # This ensures the directory is of the size given in args\n while self.dir_size(full_path) < dir_size_bytes:\n fileId += 1\n full_file_name = os.path.join(full_path, \"file{}.txt\".format(fileId))\n with open(full_file_name,\"w\") as f:\n # This ensures the file being written is of the size given in args\n while os.path.getsize(full_file_name) < file_size:\n f.write(self.fill_content_generator())\n f.close()\n self.logger.info('Master directory made under: {}'.format(full_path))\n \n def fill_subdir(self, full_path, file_size, dir_target_size):\n '''\n Calculate the size of the subdirectory and fill them.\n '''\n self.logger.info('Target size of file: {} MB'.format(file_size))\n self.logger.info('Target size of subdirectory {} : {} MB'.format(full_path, dir_target_size))\n self.write_files(full_path, file_size, dir_target_size) \n self.logger.info('Finished creating files and writing to subdirectory: {}'.format(full_path))\n\n def performUpdate(self, size_dict):\n '''\n size_dict is a dictionary with the key as the subdirectory and the value \n as a tuple of (subdir_size, max_file_size)\n self.structure_dict represents the name of the subdirectories and the increase \n (in bytes)\n '''\n for subdir_path, sizes in size_dict.iteritems():\n self.logger.info('Updating subdirectory: {}'.format(subdir_path))\n max_filesize = sizes['max_filesize']\n max_filesizeMB = self.convert_to_bytes(max_filesize,True)\n total_size = sizes['total_size']\n total_sizeMB = self.convert_to_bytes(total_size,True)\n if subdir_path.split('/')[-1] in self.structure_dict:\n total_sizeMB += int(self.structure_dict[subdir_path.split('/')[-1]])\n fileId = len(self.list_files(subdir_path))\n self.write_files(subdir_path, max_filesizeMB, total_sizeMB, fileId)\n\n def backupDir(self, top_tree, backup_dir, dry_run):\n '''\n Method to copy recursively to a backup directory\n '''\n try:\n from sh import rsync\n except ImportError:\n print \"Unable to import rsync package\"\n \n top_dir = os.path.basename(top_tree)\n top_tree += os.sep\n self.checkDirExists(top_tree)\n if 'sh' in sys.modules:\n # If rsync is available\n rsync(\"-auhv\", \"--log-file={}\".format(self.logger), top_tree, backup_dir)\n else:\n try:\n if dry_run:\n self.logger.info('Will copy from {} to {}'.format(top_tree, backup_dir))\n sys.exit(1)\n shutil.copytree(top_tree, backup_dir)\n except shutil.Error as e:\n print('Directory not copied. Error: %s' % e)\n except OSError as e:\n print('Directory not copied. Error: %s' % e)\n \n def checkDirExists(self, dir):\n if not os.path.exists(dir):\n self.logger.info('Directory does not exist: {}'.format(dir))\n return False\n \n def dir_size(self, dir, dirOnly=True, withFiles=False):\n '''\n Method to return the size of a given directory.\n '''\n for root, dirs, files in os.walk(dir):\n self._results = { }\n # os.path.getsize won't work for symbolic links\n total_size = sum(os.path.getsize(os.path.join(root, name)) for name in files) \n if dirOnly: return total_size\n max_filesize = max(os.path.getsize(os.path.join(root, name)) for name in files)\n self._results[root] = { } \n self._results[root]['total_size'] = total_size\n self._results[root]['max_filesize'] = max_filesize\n return self._results\n \n def list_files(self, path):\n ''''\n List files under a directory.\n '''\n return [f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))]\n" }, { "alpha_fraction": 0.5421394109725952, "alphanum_fraction": 0.5559157133102417, "avg_line_length": 34.596153259277344, "blob_id": "9dea9b69ee6a4c3867fa47e614acfcdc913d0d95", "content_id": "574b318eed557c7fe79ad7011486ddd97ba52e3a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3702, "license_type": "no_license", "max_line_length": 162, "num_lines": 104, "path": "/foursquare/handler/UrlHandler.py", "repo_name": "dsmiff/datapalace", "src_encoding": "UTF-8", "text": "# Dominic Smith <domlucasmith@gmail.com>\n\nimport urllib2\nimport json\nimport pandas as pd\nfrom pandas.io.json import json_normalize\nimport os\nimport time\n\npd.set_option('display.max_columns', None)\npd.set_option('display.max_colwidth', 4096)\npd.set_option('display.max_rows', 65536)\npd.set_option('display.width', 1000) \n\n##__________________________________________________________________|| \nclass UrlHandler(object):\n def __init__(self, client_id, client_secret, category_id, coords, accuracy):\n self.client_id = client_id\n self.client_secret = client_secret\n self.category_id = category_id\n pass\n\n def make_request(self,url):\n '''\n Makes a new HTTP request to the given URL\n \n :param url: The URL to request\n :returns: JSON response that will be transformed to a pandas DataFrame\n '''\n\n try: \n req = urllib2.Request(url)\n response = urllib2.urlopen(req)\n data = json.loads(response.read())\n response.close()\n return data\n \n except Exception, e:\n print e\n\n def convertToDataFrame(self, data, requested_keys):\n data = pd.DataFrame(data[\"response\"]['venues'])[requested_keys]\n writeDFtoFile(data, None, './', 'Venues', True)\n return data\n \n def analyseDataFrame(self, data, sigma, requested_keys):\n self.venueIds = []\n self.frames = []\n for d in data['id']:\n # Pass each id into API\n url2 = \"https://api.foursquare.com/v2/venues/%s?client_id=%s&client_secret=%s&v=%s\" % (d, self.client_id, self.client_secret, time.strftime(\"%Y%m%d\"))\n sub_data = self.make_request(url2)\n id_data = sub_data['response'] \n nom_data = json_normalize(id_data['venue'])\n \n print 'List of possible venues: ', id_data['venue']['name']\n self.venueIds.append(d)\n\n if \"rating\" not in nom_data.columns:\n nom_data[\"rating\"] = 'NONE' \n \n self.frames.append(nom_data[requested_keys])\n\n def mapCoordsToVenue(self):\n '''\n The location of a venue is given by its latitude and longitude.\n Assume the distribution of the latitude and longitude is Gaussian and centered at their\n nominal value. The width of each Gaussian is determined by the standard\n deviation. \n Depending on the value of the lat/long the standard deviation can be determined \n in metres (assuming 1 degree equals 111 km).\n Using the accuracy argument and the standard deviation, it should \n be possible to filter out venues.\n '''\n pass\n \n##__________________________________________________________________||\ndef writeDFtoFile(tbl, variable, dir, prefix=None, force=False):\n '''\n Write a produced DataFrame to a txt file \n given a variable name and output directory\n '''\n\n if variable is None: variable = 'out'\n if not os.path.exists(dir): os.makedirs(dir)\n\n tblName = os.path.join(dir,'tbl_n{}_{}.txt'.format(prefix, variable))\n if force and os.path.exists(tblName): os.remove(tblName)\n\n with open(tblName,'a') as f:\n tbl.to_string(f, index=True)\n f.write('\\n')\n f.close()\n print('DataFrame {} written to file'.format(tblName))\n\n##__________________________________________________________________|| \ndef meter_to_coord(p, d, lat_m, long_m):\n lat = p['lat']\n long = p['long']\n\n lat1 = lat + lat_m * (d / (11100.0/90*1000) * cos(lat))\n long1 = long + long_m * (d / (11100.0/90*1000))\n\n return {'lat': lat1, 'long': long1}\n" }, { "alpha_fraction": 0.5513016581535339, "alphanum_fraction": 0.5558958649635315, "avg_line_length": 31.649999618530273, "blob_id": "99c4318e3521cbbea8449fee01148221f4448167", "content_id": "6783dd2dcaf4c1eccb55d2c7d3927c3c29df34ea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1306, "license_type": "no_license", "max_line_length": 245, "num_lines": 40, "path": "/foursquare/data_refresh.py", "repo_name": "dsmiff/datapalace", "src_encoding": "UTF-8", "text": "'''\nDominic Smith <domlucasmith@gmail.com>\nSentiance Assignment 2.1\n'''\n\nimport os, sys\nimport re\nimport time\nimport pandas as pd\n\nfrom handler.UrlHandler import UrlHandler\nfrom core.Parser import args\n\nargs_dict = vars(args)\n##__________________________________________________________________||\nclient_id = ''\nclient_secret = ''\ncategory_id = ''\n\n##__________________________________________________________________||\ndef main(lat, long, distance):\n\n coords = {'lat': lat, 'long':long}\n accuracy = distance\n # Uncertainty on distance\n sigma = 0.68\n # P.O.I\n requested_keys = [\"categories\",\"id\",\"location\",\"name\"]\n\n url = \"https://api.foursquare.com/v2/venues/search?ll=%s,%s&intent=browse&radius=%s&categoryId=%s&client_id=%s&client_secret=%s&v=%s\" % (coords[\"lat\"], coords[\"long\"], distance, category_id, client_id, client_secret, time.strftime(\"%Y%m%d\"))\n \n data_object = UrlHandler(client_id, client_secret, category_id, coords, accuracy)\n data = data_object.make_request(url)\n # Convert out of API request to pandas DataFrame\n df = data_object.convertToDataFrame(data, requested_keys)\n data_object.analyseDataFrame(df, sigma, [\"id\",\"name\", \"likes.count\"])\n \n##__________________________________________________________________|| \nif __name__ == '__main__':\n main(**args_dict)\n" }, { "alpha_fraction": 0.7653061151504517, "alphanum_fraction": 0.7653061151504517, "avg_line_length": 54.125, "blob_id": "201339ce9efb19fe36438ef664e27f5e721d2cbd", "content_id": "505fef025a3aea13df649573164078e14fc9ed61", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 882, "license_type": "no_license", "max_line_length": 244, "num_lines": 16, "path": "/datapalace/docs/__init__.py", "repo_name": "dsmiff/datapalace", "src_encoding": "UTF-8", "text": "'''\nDominic Smith <domlucasmith@gmail.com>\n\nData size increase:\n On testing create_master_database.py, as the size of the file read increased, the memory footprint of the script also increased. Stress testing pointed to potential problems where the size of file was comparable to the available RAM.\n Solution would be to target memory usage (e.g memory_profile) There exist several alternative \n methods to improve performance (https://wiki.python.org/moin/PythonSpeed/PerformanceTips)\n\n If a file becomes too large to load in memory, the file can be broken into mutiple smaller \n files. Then proces each file separately and aggregate results afterwards (useful for \n parallel processing / batch submission)\n\n Apply automate testing of the process, through a continuous integration system that runs the \n tests against every pull request will monitor the performance.\n\n'''\n" }, { "alpha_fraction": 0.4736842215061188, "alphanum_fraction": 0.4736842215061188, "avg_line_length": 19.176469802856445, "blob_id": "9ca3a59407c2b03e80f58c4924c92c9888c8fe2c", "content_id": "5959ebe6d562c5cfdc5fac3f57ad2ac3dc794a78", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 342, "license_type": "no_license", "max_line_length": 51, "num_lines": 17, "path": "/tests/unit/test_makeBackup.py", "repo_name": "dsmiff/datapalace", "src_encoding": "UTF-8", "text": "import os\nimport sys\nimport unittest\n\n##_______________________________________________||\nclass TestMakeBackup(unittest.TestCase):\n\t\n\tdef __init__(self):\n\t\tpass\n\n\tdef create_testDir(path):\n\t\tif not os.path.exists(path):\n\t\t\tos.path.makedirs(path)\n\n##_______________________________________________||\nif __name__ == '__main__':\n\tunittest.main()" } ]
9
Shawnjoseph2001/CECS229PA3
https://github.com/Shawnjoseph2001/CECS229PA3
3f72d8403959262dd2912b837f834e8bb1b184c2
40acb66324d1e9c96fb7d3175f01e5cbb540a2b2
8869311707c23ed4210e28def13d0c3e86b2b18b
refs/heads/main
2023-01-06T00:39:18.907981
2020-11-10T04:27:06
2020-11-10T04:27:06
307,920,606
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4904552102088928, "alphanum_fraction": 0.5007342100143433, "avg_line_length": 14.837209701538086, "blob_id": "fac764db6d82ed159c952ef59d4ad7407616a10b", "content_id": "b8c1d3724c8f87f95be80fb21493d65af5e43260", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 681, "license_type": "no_license", "max_line_length": 34, "num_lines": 43, "path": "/pythonProject/main.py", "repo_name": "Shawnjoseph2001/CECS229PA3", "src_encoding": "UTF-8", "text": "def myFilter(L, num):\n for i in L:\n if not (i % num == 0):\n L.remove(i)\n return L\n\n\ndef my_lists(L):\n ret = []\n for i in L:\n ret2 = []\n for j in range(i):\n ret2.append(j)\n ret.append(ret2)\n return ret\n\n\ndef my_function_composition(f, g):\n ret = {}\n for i in f:\n ret.append(i, g.get(i))\n return ret\n\n\ndef mySum(L):\n current = 0\n for x in L:\n current = current + x\n return current\n\n\ndef myProduct(L):\n current = 1\n for x in L:\n current = current * x\n return current\n\n\ndef myMin(L):\n current = L[0]\n for x in L:\n current = min(current, x)\n return current\n" }, { "alpha_fraction": 0.5411441922187805, "alphanum_fraction": 0.5799372792243958, "avg_line_length": 19.918033599853516, "blob_id": "ced4ddb52a95563115ec9336c997a31f5003ddd6", "content_id": "b157e308b4513d96b705979590c0ffe1db7bad4d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2552, "license_type": "no_license", "max_line_length": 79, "num_lines": 122, "path": "/pythonProject/The_Field_problems.py", "repo_name": "Shawnjoseph2001/CECS229PA3", "src_encoding": "UTF-8", "text": "# version code 80e56511a793+\n# Please fill out this stencil and submit using the provided submission script.\n\n\n## 1: (Problem 1.7.1) Python Comprehensions: Filtering\ndef myFilter(L, num):\n ret = []\n for i in L:\n if not i % num == 0:\n ret.append(i)\n return ret\n\n\n## 2: (Problem 1.7.2) Python Comprehensions: Lists of Lists\n\ndef my_lists(L):\n ret = []\n for i in L:\n ret2 = []\n for j in range(1, i + 1):\n ret2.append(j)\n ret.append(ret2)\n return ret\n\n\n## 3: (Problem 1.7.3) Python Comprehensions: Function Composition\ndef myFunctionComposition(f, g):\n ret = {}\n for i in f:\n ret[i] = g.get(f[i])\n return ret\n\n\n## 4: (Problem 1.7.4) Summing numbers in a list\ndef mySum(L):\n current = 0\n for x in L:\n current = current + x\n return current\n\n\n## 5: (Problem 1.7.5) Multiplying numbers in a list\ndef myProduct(L):\n current = 1\n for x in L:\n current = current * x\n return current\n\n\n## 6: (Problem 1.7.6) Minimum of a list\ndef myMin(L):\n current = L[0]\n for x in L:\n current = min(current, x)\n return current\n\n\n## 7: (Problem 1.7.7) Concatenation of a List\ndef myConcat(L):\n '''\n Input:\n -L:a list of strings\n Output:\n -the concatenation of all the strings in L\nBe sure your procedure works for the empty list.\n Examples:\n >>> myConcat(['hello','world'])\n 'helloworld'\n >>> myConcat(['what','is','up'])\n 'whatisup'\n '''\n pass\n\n\n## 8: (Problem 1.7.8) Union of Sets in a List\ndef myUnion(L):\n '''\n Input:\n -L:a list of sets\n Output:\n -the union of all sets in L\nBe sure your procedure works for the empty list.\n Examples:\n >>> myUnion([{1,2},{2,3}])\n {1, 2, 3}\n >>> myUnion([set(),{3,5},{3,5}])\n {3, 5}\n '''\n pass\n\n\n## 9: (Problem 1.7.10) Complex Addition Practice\n# Each answer should be a Python expression whose value is a complex number.\n\ncomplex_addition_a = ...\ncomplex_addition_b = ...\ncomplex_addition_c = ...\ncomplex_addition_d = ...\n\n\n## 10: (Problem 1.7.12) Combining Complex Operations\n# Write a procedure that evaluates ax+b for all elements in L\n\ndef transform(a, b, L):\n '''\n Input:\n -a: a number\n -b: a number\n -L: a list of numbers\n Output:\n -a list of elements where each element is ax+b where x is an element in L\n Examples:\n >>> transform(3,2,[1,2,3])\n [5, 8, 11]\n '''\n pass\n\n\n## 11: (Problem 1.7.13) GF(2) Arithmetic\nGF2_sum_1 = ... # answer with 0 or 1\nGF2_sum_2 = ...\nGF2_sum_3 = ...\n" } ]
2
dawnsea/mkd2blog
https://github.com/dawnsea/mkd2blog
961fdd1ff33b76b3eeddb2bf09f737499ffa3cde
90670969050bf88cdd7084867e1a573a612306b5
13705f5aae237f854179730d81d07e251bfa33b8
refs/heads/master
2021-01-01T05:18:44.182954
2016-04-12T02:47:25
2016-04-12T02:47:25
56,023,598
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.554731011390686, "alphanum_fraction": 0.5774582624435425, "avg_line_length": 21.93617057800293, "blob_id": "684310377e92506431184cacc3665e8fc5fee300", "content_id": "b7d73e8e3fa79a3bad9098fdf7efe472b0e99714", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3814, "license_type": "no_license", "max_line_length": 91, "num_lines": 94, "path": "/README.md", "repo_name": "dawnsea/mkd2blog", "src_encoding": "UTF-8", "text": "# mkd2blog\n마크다운을 블로그로\n\n\n# 뭐하는 놈인가?\n깃헙에 마크다운으로 블로그 글을 쓴 다음, 이 툴을 사용하면 완전한 정적 페이지로 전환해줌.\nftp 업로드까지 자동으로 처리해주긴 함..\n\n\n\n# 서문\n만들기 귀찮아서 데지는 줄 알았... 꼭 필요해서 만들었음.\n파이썬 잘 못 함.. \n\n\n# 사용법\npython ./tblog_static.py clone | pull\n\n\n# 설정\n소스 앞부분을 다음과 같이 수정\n\n```python \nmy_config = { 'REPODIR' : '29만원', # 레파짓의 로컬 디렉토리 이름\n 'GITHUB' : 'https://github.com/dawnsea/니가가라하와이.git', # 블로그 마크다운 문서들 보관장소 \n 'DOCROOT' : '/blog/', # 웹서버 홈\n 'NAME' : '투표', # rss 표시 이름\n 'DESC' : '투표해', # rss 표시 설명\n 'URL' : 'http://투표하세요', # rss 에서 사용할 블로그 URL\n 'FTP' : 'troot.co.kr', # ftp 업로드 주소\n 'FTPUSER' : '씨바스리갈', # ftp 계정\n 'FTPPATH' : '우주', # 웹서버의 경로\n 'FTPREMOTE' : '/home/hosting_users/살려야한다' } # 웹서버의 절대 경로\n```\n\n# 글쓰기\n레파짓에 다음 경로를 생성함\n\n- images : 블로그 안에 이미지를 삽입할 경우 여기에 넣어둠, 이미지는 마크다운 태그로 삽입하되 /로 시작함.\n- 년-월 폴더 : 이 안에 글을 씀\n- 년-월 폴더/url : 파일명이 url이 됨.\n- 예) 2016-04/첫글이당 : 첫글이당이라는 마크다운 파일이 내가 쓴 글임\n- 규칙 : 최초 3줄을 다음과 같이 씀 (형식 지켜야됨)\n- - 제목 : 나는 제목이당\n- - 태그 : 음악, 잡담, 영화\n- - 날짜 : 2016/04/04 \n- 제목에 [비공개] 를 포함하면 나만 보는 글이 됨\n\n\n# 꾸미기 \n- template/css/style.css, style2.css 수정\n- template/*.html 수정\n\n\n# 퍼블리시\n- python tblog_static.py 실행하면 정적 페이지가 deploy에 생성됨. \n- 이어서 ftp 업로드가 수행됨. (귀찮으면 걍 빼삼..)\n- 귀찮으면 서버에 python 모듈 깔고 크론탭에 걸기 바람.. 더 이상 지원 몬함 귀찮음.\n- 매번 암호 넣기 귀찮으면 코드에 넣으삼.. 털리면 니 책임.\n\n# 관리자 (나만 보는 페이지)\n- 툴을 실행하면 콘솔에 어드민용 주소가 표시됨. 그 페이지로 들어가면 비공개 글가지 다 보임.\n- 예) http://www.troot.co.kr/blog/admin240bf9e4-42d5-413a-93be-1085c88bbddb\n- 이 주소는 퍼블리시 할 때마다 바뀜. 귀찮으면 고정으로 코드 수정 gogo \n\n# 부족한 기능들\n- 고쳐 써라\n\n# 있는 기능\n- 방명록 : 그냥 제 디스쿠스가 붙어있습니다. 본인 걸로 바꿔쓰세요\n- 소개 페이지 : template의 html을 바꿔쓰세요\n- 논페이징 : 페이징이 음슴. 1인이 생성할 수 있는 컨텐츠의 양은 정해져있음.\n- 논검색 : 검색이 없음. 1페이지이므로 브라우저 검색하면 됨... (응?)\n\n# 주의사항\n- ftp 업로드시 비교를 파일크기로 합니다. 수정해도 파일크기가 같으면 overwrite 안 해요.\n- 사실 더 만들기가 귀찮았습니다.\n- 갑자기 왜 존댓말이지? \n\n# 라이선스\n- 그땅거 음따. \n\n# 후기\n- 플라스크 학습용으로 github 연동을 블로그 툴을 만들었는데,\n- 만들다 보니 CRUD를 다 구현할 필요가 있을까 github에서 글 쓰기가 좋은데,\n- 그러다 보니 머더러 CRUD를 다 구현할 껑가 이래서야 정적 페이지가 아니자너\n- 에라 모르겠다 걍 올 html로 찍어버리자 이게 영속성이 이따.\n- ... 하고 만들었습니다.\n\n단 두 명만!! 제발!! 두 명만이라도 써주신다면..\n\n\n# 예제\n- http://64.23.78.147/blog/\n" }, { "alpha_fraction": 0.48722171783447266, "alphanum_fraction": 0.49051472544670105, "avg_line_length": 30.747726440429688, "blob_id": "d7a4745de4283f08b2f6650d61e9d45f39ffdc2b", "content_id": "81447d0d09c0a02115aaf0e32d67a13166076f6e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14089, "license_type": "no_license", "max_line_length": 191, "num_lines": 440, "path": "/tblog_static.py", "repo_name": "dawnsea/mkd2blog", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\n :copyright: (c) 2016 by dawnsea\n :license: BSD, see LICENSE for more details.\n\"\"\"\nimport os\nimport sys\nimport base64\nimport hashlib\nimport git\nimport datetime\nimport time as mytime\nimport urllib\nimport shutil\nimport uuid\nimport getpass\n\nfrom collections import defaultdict\nimport operator\n\nfrom pygments import highlight\nfrom pygments.formatters import HtmlFormatter\nfrom pygments.lexers import get_lexer_by_name\nimport misaka as mk\n\nfrom operator import itemgetter\n\nimport paramiko\n\nimport pysftp\nimport sys\n\n\n\nmy_config = { 'REPODIR' : '29만원',\n 'GITHUB' : 'https://github.com/니가가라하와이', \n 'DOCROOT' : '/blog/',\n 'NAME' : '[t:/]',\n 'DESC' : '[t:/] is not technology-root',\n 'URL' : 'http://www.troot.co.kr',\n 'FTP' : 'troot.co.kr',\n 'FTPUSER' : 'keeptalk',\n 'FTPPATH' : 'www',\n 'FTPREMOTE' : '/home/hosting_users/리승만' }\n\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\nclass HighlighterRenderer(mk.HtmlRenderer):\n def blockcode(self, text, lang):\n if not lang:\n return '\\n<blockquote>{}</blockquote>\\n'.format(text.strip()).replace(' \\n', '<br>\\n').replace('\\n\\n', '<br>')\n# return '\\n<pre><code>{}</code></pre>\\n'.format(text.strip())\n# return '\\n{}\\n'.format(text.strip())\n\n lexer = get_lexer_by_name(lang, stripall=True)\n formatter = HtmlFormatter()\n\n return highlight(text, lexer, formatter)\n\nrenderer = HighlighterRenderer()\nmd = mk.Markdown(renderer, extensions=mk.EXT_AUTOLINK | mk.EXT_FENCED_CODE | mk.EXT_NO_INTRA_EMPHASIS | mk.EXT_QUOTE)\n\n\ndef get_posts(tagstr = '', admin = False):\n ymlist = os.listdir(my_config['REPODIR'])\n ymlist = sorted(ymlist, reverse = True)\n \n posts = []\n for ym in ymlist:\n if os.path.isdir(my_config['REPODIR'] + '/' + ym) and ym != '.git' and ym != 'images':\n titles = os.listdir(my_config['REPODIR'] + '/' + ym)\n \n for title in titles:\n post = {}\n try:\n f = open(my_config['REPODIR'] + '/' + ym + '/' + title, 'r')\n subject = f.readline()\n tag = f.readline()\n date = f.readline()\n \n text = f.read()\n\n f.close()\n# print subject\n \n except:\n print 'file error'\n continue\n \n if tagstr != '':\n if '태그 :' not in tag or ' ' + urllib.unquote(tagstr).encode('utf-8') not in tag:\n continue\n \n subject = subject.replace('제목 :', '').strip()\n date = date.replace('날짜 :', '').strip()\n\n if not admin:\n if '[비밀글]' in subject or '[비공개]' in subject:\n continue\n \n try:\n write_time = mytime.strptime(date, '%Y/%m/%d')\n except:\n print '타임 에러 %s' % subject\n continue\n \n post['year'] = mytime.strftime('%Y', write_time)\n post['month'] = mytime.strftime('%m', write_time)\n post['day'] = mytime.strftime('%d', write_time)\n post['date'] = date \n post['title'] = urllib.unquote(subject)\n post['filename'] = title\n post['timestamp'] = mytime.mktime(write_time)\n post['timestr'] = mytime.asctime(mytime.localtime(post['timestamp'])) + ' +0900'\n post['tags'] = tag.replace('태그 : ', '').strip().split(',')\n post['tag'] = tag.replace('태그 : ', '').strip()\n post['text'] = md(text.decode('utf-8'))\n post['text'] = post['text'].replace('</p>\\n\\n', '</p><br>\\n').replace('img src=\"/', 'img src=\"%s/images/' % my_config['DOCROOT'])\n post['stext'] = escape(mk.html(text.decode('utf-8')[:100] + '... (RSS 생략)'))\n posts.append(post)\n \n else:\n print '제목이 없음'\n posts = sorted(posts, key=itemgetter('timestamp'), reverse = True)\n\n return posts\n\ndef escape(t):\n return (t\n .replace(\"&\", \"&amp;\").replace(\"<\", \"&lt;\").replace(\">\", \"&gt;\")\n .replace(\"'\", \"&#39;\").replace('\"', \"&quot;\")\n )\n\ndef git_clone():\n \n git.Git().clone(my_config['GITHUB'])\n \ndef git_pull():\n git.cmd.Git(my_config['REPODIR']).pull()\n \n\ndef gen_deploy():\n try:\n shutil.rmtree('deploy')\n except:\n pass\n \n try:\n os.makedirs('deploy/tag')\n except:\n pass\n\n shutil.copytree('templates/css', 'deploy/css')\n shutil.copytree('%s/images' % my_config['REPODIR'], 'deploy/images')\n \ndef get_layout(title = '', body = ''):\n f = open('templates/header.html', 'r')\n ret = f.read()\n f.close()\n \n if title != '':\n ret = ret.replace('##title##', title)\n \n if body != '':\n ret = ret.replace('##body##', body)\n \n return ret.strip().replace('##docroot##', my_config['DOCROOT']);\n \ndef get_front(posts):\n f = open('templates/front.html', 'r')\n ret = f.read().strip()\n f.close()\n \n front_list = ''\n \n for post in posts:\n if '[비공개]' in post['title'] or '[비밀글]' in post['title']:\n class_name = 'class=\"admin\"'\n else:\n class_name = 'class=\"uline\"'\n front_list = front_list + '<li><a %s href=\"%s%s/%s/%s.html\">%s</a><span class=date>%s</span></li>\\n' % \\\n (class_name, my_config['DOCROOT'], post['year'], post['month'], urllib.quote(post['filename']), post['title'].decode('utf8'), post['date'])\n \n# {{ url_for('view', year=post.year, month=post.month, filename=post.filename.decode('utf-8'))}}\">{{ post.title.decode('utf8') }}</a><span class=date>{{ post.date }}</span></li>\n\n return ret.replace('##list##', front_list).replace('##count##', str(len(posts))).strip()\n \ndef deploy_file(filename, text):\n f = open(filename, 'w')\n f.write(text)\n f.flush()\n f.close()\n \n\ndef gen_guest():\n f = open('templates/guest.html', 'r')\n guest = f.read()\n f.close()\n \n body = get_layout(title = '방명록', body = guest)\n deploy_file('deploy/guest.html', body)\n \ndef gen_rss(posts):\n \n f = open('templates/rss.xml', 'r')\n rss = f.read()\n f.close()\n \n rss = rss.replace('##name##', my_config['NAME']).replace('##url##', my_config['URL']).replace('##desc##', my_config['DESC'])\n rss = rss.replace('##pubdate##', datetime.datetime.now().strftime(\"%a, %d %b %Y %H:%M:%S +0900\"))\n \n index = 0\n \n itemstr = ''\n for post in posts:\n itemstr = itemstr + \\\n ' <item>\\n' \\\n ' <title>%s</title>\\n' \\\n ' <link>%s%s/%s/%s/%s</link>\\n' \\\n ' <description>%s</description>\\n' \\\n ' <pubDate>%s</pubDate>\\n' \\\n ' </item>\\n' % (post['title'].decode('utf-8'), my_config['URL'], my_config['DOCROOT'], post['year'], post['month'], post['filename'], post['stext'], post['timestr'])\n \n if index > 10:\n break\n index = index + 1\n \n rss = rss.replace('##itemstr##', itemstr)\n deploy_file('deploy/rss.xml', rss)\n \n\ndef gen_tagall(posts):\n \n gtags = defaultdict(lambda: 1)\n\n for post in posts:\n for tag in post['tags']:\n\n tag = tag.strip()\n\n if tag is not '':\n gtags[tag] = gtags[tag] + 1\n\n gtags = sorted(gtags.items(), key=operator.itemgetter(0), reverse = False)\n \n f = open('templates/tagall.html', 'r')\n tagall = f.read()\n f.close()\n \n tagstr = ''\n for tag in gtags:\n tagstr = tagstr + '<a class=\"uline\" href=\"%stag/%s.html\">%s</a><span class=date>/%s</span><br>\\n' % (my_config['DOCROOT'], tag[0], tag[0], tag[1] - 1)\n \n tagall = tagall.replace('##tagall##', tagstr) \n \n body = get_layout(title = '태그에 의한 분류', body = tagall)\n deploy_file('deploy/tagall.html', body)\n\n\ndef gen_tags(posts):\n \n body = get_layout()\n \n tag_list = set('')\n for tag in posts:\n \n for tag_single in tag['tags']:\n tag_list.add(tag_single.strip())\n \n for tag_single in tag_list:\n \n if tag_single.strip() == '':\n continue\n \n post_result = [] \n for post in posts:\n \n if tag_single in post['tag']:\n post_result.append(post)\n \n view_result = body.replace('##title##', tag_single).replace('##body##', get_front(post_result)).replace('##docroot##', my_config['DOCROOT'])\n deploy_file('deploy/tag/%s.html' % (tag_single), view_result)\n\n \n\n\n\n\n\n# break;\n# print(str(tag_list))\n\n \n \ndef gen_sub(posts):\n \n f = open('templates/content.html', 'r')\n ret = f.read().strip();\n f.close()\n \n body = get_layout()\n \n for post in posts:\n try:\n os.makedirs('deploy/%s/%s' % (post['year'], post['month']))\n \n except:\n pass\n \n result = ''\n \n result = ret.replace('##title##', post['title'])\n result = result.replace('##content##', post['text']) \n result = result.replace('##date##', post['date'])\n result = body.replace('##title##', post['title']).replace('##body##', result).strip()\n \n itemstr = ''\n for item in post['tags']:\n itemstr = itemstr + '<a href=\"%stag/%s.html\">%s/</a> ' % ( my_config['DOCROOT'], item.strip(), item.strip())\n \n result = result.replace('##tag##', itemstr)\n \n deploy_file('deploy/%s/%s/%s.html' % (post['year'], post['month'], post['filename']), result)\n \ndef gen_static():\n print '시작'\n \n gen_deploy()\n posts = get_posts(admin = True)\n \n admin_url = 'admin%s' % uuid.uuid4()\n \n front_list = get_front(posts)\n deploy_file('deploy/%s' % admin_url, get_layout(title = 'admin', body = front_list)) \n \n print 'amdin url = %s%s%s' % (my_config['URL'], my_config['DOCROOT'], admin_url)\n print 'amdin url = http://localhost%s%s' % (my_config['DOCROOT'], admin_url)\n \n gen_sub(posts)\n gen_guest()\n gen_rss(posts)\n \n posts = get_posts() \n front_list = get_front(posts)\n deploy_file('deploy/index.html', get_layout(title = '_', body = front_list)) \n gen_tagall(posts)\n gen_tags(posts) \n \n\ndef r_put(sftp, cur = ''):\n remote_path = my_config['FTPREMOTE']\n\n\n replace_path = '%s/deploy' % os.getcwd() \n\n if cur == '':\n local_path = '%s/deploy' % os.getcwd() \n else:\n\t\tlocal_path = cur\n\n filenames = os.listdir(local_path)\n for filename in filenames:\n full_filename = os.path.join(local_path, filename)\n if os.path.isdir(full_filename):\n try:\n sftp.mkdir(full_filename.replace('%s/' % replace_path, ''))\n except:\n pass\n r_put(sftp, full_filename)\n else:\n try:\n remote_size = sftp.stat(full_filename.replace('%s/' % replace_path, '')).st_size\n except:\n remote_size = 0\n pass\n# print os.path.getsize(os.path.join(local_path, full_filename))\n if os.path.getsize(os.path.join(local_path, full_filename)) != remote_size:\n\n sftp.put(os.path.join(local_path, full_filename), full_filename.replace('%s/' % replace_path, ''))\n print ('%s, %s' % (os.path.join(local_path, full_filename), full_filename.replace('%s/' % replace_path, '')))\n''' \t\n for root, dirs, files in os.walk(local_path):\n for fname in files:\n full_fname = os.path.join(root, fname)\n #print full_fname \n print '%s/%s' % (root.replace(local_path, ''), fname)\n #os.path.join(root, remote_path, fname)\n \n sftp.put(full_fname, os.path.join(root.replace(local_path, ''), fname))\n''' \t \n \nif __name__ == '__main__': \n\n print '사용법 : tblog_statc.py clone | pull\\n'\n \n if len(sys.argv) > 1:\n if sys.argv[1] == 'clone':\n git_clone()\n if sys.argv[1] == 'pull':\n git_pull() \n else:\n gen_static()\n \n print '%s/deploy' % os.getcwd()\n \n pw = getpass.getpass(\"ftp %s password : \" % my_config['FTP'])\n \n ssh = paramiko.SSHClient()\n\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh.connect(my_config['FTP'], username = my_config['FTPUSER'], password = pw)\n\n sftp = ssh.open_sftp()\n sftp.chdir(my_config['FTPPATH'])\n \n \n for filename in sftp.listdir('.'):\n if 'admin' in filename:\n print '%s/%s' % (my_config['FTPPATH'], filename)\n sftp.remove(filename)\n \n r_put(sftp) \n\n sftp.close()\n ssh.close()\n\n \n'''\n with pysftp.Connection(my_config['FTP'], username = my_config['FTPUSER'], password = pw) as sftp:\n for filename in sftp.listdir(my_config['FTPPATH']):\n if 'admin' in filename:\n print '%s/%s' % (my_config['FTPPATH'], filename)\n sftp.remove('%s/%s' % (my_config['FTPPATH'], filename))\n try:\n sftp.put_r('%s/deploy' % os.getcwd(), my_config['FTPPATH'], confirm = True, preserve_mtime = True)\n\n except:\n pass\n'''\n" } ]
2
lvidarte/django-nerdlabs
https://github.com/lvidarte/django-nerdlabs
b5b6a93431b424e3d27717a9f0910d2843016819
0fe631e6e81929a805f4b3de459bab00a3e26c58
fef7ce013711c79c68591229a9fc1bbccb5474f2
refs/heads/master
2021-06-08T19:35:11.602996
2016-10-13T14:06:37
2016-10-13T14:06:37
1,291,505
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6463730335235596, "alphanum_fraction": 0.6476684212684631, "avg_line_length": 34.09090805053711, "blob_id": "237a70956fe3719a6debd9fdc01d333aef4c628a", "content_id": "7f7a48d685b2275a04f3e78cfb6f62a1f02019c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 772, "license_type": "no_license", "max_line_length": 89, "num_lines": 22, "path": "/nerdlabs/blog/context_processors.py", "repo_name": "lvidarte/django-nerdlabs", "src_encoding": "UTF-8", "text": "\"\"\"\nAuthor: Leo Vidarte <http://nerdlabs.com.ar>\n\nThis is free software,\nyou can redistribute it and/or modify it\nunder the terms of the GPL version 3\nas published by the Free Software Foundation.\n\n\"\"\"\n\nfrom django.conf import settings\n\ndef blog(request):\n return {\n 'BLOG_NAME': getattr(settings, 'BLOG_NAME', u''),\n 'BLOG_DESCRIPTION': getattr(settings, 'BLOG_DESCRIPTION', u''),\n 'BLOG_KEYWORDS': getattr(settings, 'BLOG_KEYWORDS', u''),\n 'BLOG_FEEDS_URL': getattr(settings, 'BLOG_FEEDS_URL', u''),\n 'DATE_FORMAT': getattr(settings, 'DATE_FORMAT', 'F j, Y'),\n 'DATETIME_FORMAT': getattr(settings, 'DATETIME_FORMAT', 'F j, Y H:i:s'),\n 'GOOGLE_ANALYTICS_TEMPLATE': getattr(settings, 'GOOGLE_ANALYTICS_TEMPLATE', u''),\n }\n" }, { "alpha_fraction": 0.7400000095367432, "alphanum_fraction": 0.7599999904632568, "avg_line_length": 49, "blob_id": "f14b7202b90b537f98d2c4807b52579962f9f8cf", "content_id": "eda861e86441e56c9a25bd70fc34073d700f389e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 100, "license_type": "no_license", "max_line_length": 87, "num_lines": 2, "path": "/bin/deploy.sh", "repo_name": "lvidarte/django-nerdlabs", "src_encoding": "UTF-8", "text": "#!/bin/bash\nln -s ../env/lib/python2.7/site-packages/django/contrib/admin/static/admin static/admin\n" }, { "alpha_fraction": 0.7126168012619019, "alphanum_fraction": 0.7149532437324524, "avg_line_length": 22.77777862548828, "blob_id": "ddfccb92f34803b39ad250625c767498d57ba4d6", "content_id": "5a108e14e7b4c8d5553f0a156d15464321953542", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 428, "license_type": "no_license", "max_line_length": 64, "num_lines": 18, "path": "/manage.py", "repo_name": "lvidarte/django-nerdlabs", "src_encoding": "UTF-8", "text": "#!env/bin/python\n\n\"\"\"\nAuthor: Leo Vidarte <http://nerdlabs.com.ar>\n\nThis is free software,\nyou can redistribute it and/or modify it\nunder the terms of the GPL version 3\nas published by the Free Software Foundation.\n\n\"\"\"\n\nimport os, sys\n\nif __name__ == \"__main__\":\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"settings\")\n from django.core.management import execute_from_command_line\n execute_from_command_line(sys.argv)\n" }, { "alpha_fraction": 0.552571177482605, "alphanum_fraction": 0.5654270052909851, "avg_line_length": 27.090322494506836, "blob_id": "e2d12e454ff456592b84229e00175f5b39d097aa", "content_id": "c9a5a52f48b10746389d1d402f20c5b100968764", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "Python", "length_bytes": 4356, "license_type": "no_license", "max_line_length": 75, "num_lines": 155, "path": "/nerdlabs/cache/views.py", "repo_name": "lvidarte/django-nerdlabs", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n\"\"\"\nAuthor: Leo Vidarte <http://nerdlabs.com.ar>\n\nThis is free software,\nyou can redistribute it and/or modify it\nunder the terms of the GPL version 3\nas published by the Free Software Foundation.\n\n\"\"\"\n\nimport os.path\nfrom PIL import Image, ImageDraw\nimport math\nimport datetime, re\n\nfrom django.conf import settings\nfrom django.http import HttpResponse\nfrom django.http import Http404\nfrom django.core.cache import cache\nfrom django.shortcuts import render_to_response\n\nfrom nerdlabs.common.tools import clevercss\n\n\ndef parse_dcss_file(request, filename):\n # http://github.com/timparkin/clevercss\n # http://lucumr.pocoo.org/2007/9/17/using-clevercss-in-django\n fn = os.path.join(settings.STATIC_ROOT, 'dcss', '%s.dcss' % filename)\n if not os.path.exists(fn):\n raise Http404()\n f = file(fn)\n try:\n css = \"/* %s */\\n\\n\" % datetime.datetime.now()\n css += clevercss.convert(f.read().decode('utf-8'))\n\n if not settings.DEBUG:\n l = []\n for line in css.split('\\n'):\n if not line:\n l.append('\\n')\n l.append(line.strip())\n\n compress = ''.join(l)\n compress = compress.replace(': ', ':')\n\n # comentar las 3 lineas siguientes para\n # mayor compresion y menor legibilidad\n compress = compress.replace(';', '; ')\n compress = compress.replace('{', '{ ')\n compress = compress.replace(',', ', ')\n\n css = compress\n\n return HttpResponse(css, content_type='text/css')\n finally:\n f.close()\n\n\ndef img_resize(request, url, width=0, height=0):\n try:\n url_ = url[len('media/')+1:] # strip 'media/'\n image = Image.open(settings.MEDIA_ROOT + url_)\n except:\n raise Http404()\n else:\n w, h = image.size\n width = int(width)\n height = int(height)\n\n if width and \\\n width in getattr(settings, 'IMG_ALLOWED_WIDTHS', (width,)):\n height = int(math.ceil(float(width) * float(h) / float(w)))\n elif height and \\\n height in getattr(settings, 'IMG_ALLOWED_HEIGHTS', (height,)):\n width = int(math.ceil(float(height) * float(w) / float(h)))\n\n if width and height:\n image.thumbnail((width, height), Image.ANTIALIAS)\n draw = ImageDraw.Draw(image)\n #draw.text((5,5), \"%dx%d\" % (width, height))\n #draw.text((5,5), datetime.now().strftime('%H:%M:%S'))\n response = HttpResponse(content_type=\"image/%s\"%image.format)\n image.save(response, image.format, quality=90)\n return response\n else:\n raise Http404()\n\n\ndef cache_rm(request, path):\n # http://djangosnippets.org/snippets/936/\n if cache.has_key(path):\n cache.delete(path)\n result = \"DELETED\"\n else:\n result = \"NOT FOUND\"\n return HttpResponse('<h1>%s</h1><h4>%s</h4>' % (result, path))\n\n\ndef memcached_status(request):\n # http://effbot.org/zone/django-memcached-view.htm\n try:\n import memcache\n except ImportError:\n raise Http404\n\n #if not (request.user.is_authenticated() and\n # request.user.is_staff):\n # raise Http404\n\n # get first memcached URI\n m = re.match(\n \"([.\\w]+:\\d+)\", settings.CACHES['default']['LOCATION'])\n if not m:\n raise Http404\n\n host = memcache._Host(m.group(1))\n host.connect()\n host.send_cmd(\"stats\")\n\n class Stats:\n pass\n\n stats = Stats()\n\n while 1:\n line = host.readline().split(None, 2)\n if line[0] == \"END\":\n break\n stat, key, value = line\n try:\n # convert to native type, if possible\n value = int(value)\n if key == \"uptime\":\n value = datetime.timedelta(seconds=value)\n elif key == \"time\":\n value = datetime.datetime.fromtimestamp(value)\n except ValueError:\n pass\n setattr(stats, key, value)\n\n host.close_socket()\n\n try:\n hit_rate = 100 * stats.get_hits / stats.cmd_get\n except:\n hit_rate = 0\n\n return render_to_response(\n 'cache/memcache_status.html', dict(\n stats=stats,\n hit_rate=hit_rate,\n time=datetime.datetime.now(), # server time\n ))\n\n\n" }, { "alpha_fraction": 0.6227409839630127, "alphanum_fraction": 0.6234939694404602, "avg_line_length": 25.520000457763672, "blob_id": "a94535cd99ee993baca41e2460db18618a7ecb11", "content_id": "40a95f9dc6ab9967eb58299f12b9a920c62f7812", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1328, "license_type": "no_license", "max_line_length": 72, "num_lines": 50, "path": "/urls.py", "repo_name": "lvidarte/django-nerdlabs", "src_encoding": "UTF-8", "text": "\"\"\"\nAuthor: Leo Vidarte <http://nerdlabs.com.ar>\n\nThis is free software,\nyou can redistribute it and/or modify it\nunder the terms of the GPL version 3\nas published by the Free Software Foundation.\n\n\"\"\"\n\nimport os.path\n\nfrom django.conf.urls import patterns, url, include\nfrom django.conf import settings\nfrom django.views.generic import RedirectView\n\nfrom django.contrib import admin\nadmin.autodiscover()\n\n\nurlpatterns = patterns('',\n\n (r'^admin/doc/', include('django.contrib.admindocs.urls')),\n (r'^admin/', include(admin.site.urls)),\n\n (r'^pages/', include('django.contrib.flatpages.urls')),\n (r'^blog/', include('nerdlabs.blog.urls')),\n (r'^cache/', include('nerdlabs.cache.urls')),\n\n (r'^favicon.ico$', RedirectView.as_view(url='/static/favicon.ico')),\n (r'^robots.txt$', RedirectView.as_view(url='/static/robots.txt')),\n\n (r'^$', RedirectView.as_view(url='/blog/')),\n\n)\n\n\nif getattr(settings, 'STATIC_SERVER', False):\n urlpatterns += patterns('',\n url(r'^media/(?P<path>.*)$',\n 'django.views.static.serve',\n {'document_root': settings.MEDIA_ROOT},\n name='blog-media'\n ),\n url(r'^static/(?P<path>.*)$',\n 'django.views.static.serve',\n {'document_root': settings.STATIC_ROOT},\n name='blog-static'\n ),\n )\n\n\n" }, { "alpha_fraction": 0.6934782862663269, "alphanum_fraction": 0.695652186870575, "avg_line_length": 17.360000610351562, "blob_id": "1e22c0ac728c67182ccd55968d96d25d4b241b0d", "content_id": "0ebd5a693854c4f0532d5eed861a2acc99e6e997", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "Python", "length_bytes": 460, "license_type": "no_license", "max_line_length": 47, "num_lines": 25, "path": "/nerdlabs/cache/templatetags/cache.py", "repo_name": "lvidarte/django-nerdlabs", "src_encoding": "UTF-8", "text": "\"\"\"\nAuthor: Leo Vidarte <http://nerdlabs.com.ar>\n\nThis is free software,\nyou can redistribute it and/or modify it\nunder the terms of the GPL version 3\nas published by the Free Software Foundation.\n\n\"\"\"\n\nfrom django import template\n\nregister = template.Library()\n\n## ver reverse()\n\n\n@register.filter\ndef wthumb(value, size):\n return \"/cache/imgs/w/%s%s\" % (size, value)\n\n\n@register.filter\ndef hthumb(value, size):\n return \"/cache/imgs/h/%s%s\" % (size, value)\n\n" }, { "alpha_fraction": 0.7207207083702087, "alphanum_fraction": 0.7237237095832825, "avg_line_length": 22.785715103149414, "blob_id": "5cf956c423301c2f10410d6d59383829393a7eeb", "content_id": "edb3cdd235c34af6fe9aff41b11e5b8e9def1d21", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "Python", "length_bytes": 333, "license_type": "no_license", "max_line_length": 49, "num_lines": 14, "path": "/nerdlabs/cache/__init__.py", "repo_name": "lvidarte/django-nerdlabs", "src_encoding": "UTF-8", "text": "\"\"\"\nAuthor: Leo Vidarte <http://nerdlabs.com.ar>\n\nThis is free software,\nyou can redistribute it and/or modify it\nunder the terms of the GPL version 3\nas published by the Free Software Foundation.\n\n\"\"\"\n\nfrom django.utils.encoding import smart_str\n\ndef make_key(key, key_prefix, version):\n return ':'.join([key_prefix, smart_str(key)])\n" }, { "alpha_fraction": 0.6023529171943665, "alphanum_fraction": 0.6105882525444031, "avg_line_length": 21.36842155456543, "blob_id": "db3636eab8f2c417198a9a70d1170e96f918257b", "content_id": "b3ac84666942e39c0212002342a77517fe61944d", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "Python", "length_bytes": 850, "license_type": "no_license", "max_line_length": 72, "num_lines": 38, "path": "/nerdlabs/cache/urls.py", "repo_name": "lvidarte/django-nerdlabs", "src_encoding": "UTF-8", "text": "\"\"\"\nAuthor: Leo Vidarte <http://nerdlabs.com.ar>\n\nThis is free software,\nyou can redistribute it and/or modify it\nunder the terms of the GPL version 3\nas published by the Free Software Foundation.\n\n\"\"\"\n\nfrom django.conf.urls import patterns, url, include\nfrom nerdlabs.cache.views import *\n\n# patterns(prefix, pattern_description, ...)\n# \n# pattern_description:\n# (regular expression, Python callback function [, dictionary [, name]])\n\n\nurlpatterns = patterns('nerdlabs.cache.views',\n\n (r'^styles/([a-zA-Z_]+)\\.css', parse_dcss_file),\n\n url(r'^imgs/w/(?P<width>\\d{2,3})(?P<url>/.+)$',\n view='img_resize',\n name='cache-imgs-w'\n ),\n\n url(r'^imgs/h/(?P<height>\\d{2,3})(?P<url>/.+)$',\n view='img_resize',\n name='cache-imgs-h'\n ),\n\n (r'^rm(/[-0-9a-zA-Z_/\\.]+)$', cache_rm),\n\n (r'^status', memcached_status),\n\n)\n" }, { "alpha_fraction": 0.6340482831001282, "alphanum_fraction": 0.6461126208305359, "avg_line_length": 23.83333396911621, "blob_id": "1d093129c8b8f05bca567d6571328ed3686a000f", "content_id": "49b1ae26287403244e545427e8577a7c40ae6cc7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 746, "license_type": "no_license", "max_line_length": 67, "num_lines": 30, "path": "/nerdlabs/blog/middleware.py", "repo_name": "lvidarte/django-nerdlabs", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n\"\"\"\nAuthor: Leo Vidarte <http://nerdlabs.com.ar>\n\nThis is free software,\nyou can redistribute it and/or modify it\nunder the terms of the GPL version 3\nas published by the Free Software Foundation.\n\n\"\"\"\n\nfrom django.conf import settings\nfrom django.core.cache import cache\n\n\nclass NginxMemcacheMiddleWare:\n def process_response(self, request, response):\n path = request.get_full_path()\n\n if getattr(settings, 'BLOG_CACHE_ENABLED', False) \\\n and request.method == \"GET\" \\\n and response.status_code == 200:\n\n key = \"blog:%s\" % path\n timeout = getattr(settings, 'BLOG_CACHE_TIMEOUT', 3600)\n\n cache.set(key, response.content, timeout)\n\n return response\n\n" }, { "alpha_fraction": 0.6229507923126221, "alphanum_fraction": 0.6229507923126221, "avg_line_length": 19.33333396911621, "blob_id": "d3705af737ce506d73d9d24a0759d8fa7c3a95b7", "content_id": "d776e8ff079a4bbff48f81a1af8bc2af9d00ba38", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 122, "license_type": "no_license", "max_line_length": 40, "num_lines": 6, "path": "/bin/create_env.sh", "repo_name": "lvidarte/django-nerdlabs", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nvirtualenv env \\\n && source env/bin/activate \\\n && pip install -r requirements.txt \\\n && deactivate\n" }, { "alpha_fraction": 0.5479216575622559, "alphanum_fraction": 0.5486087203025818, "avg_line_length": 28.693878173828125, "blob_id": "a1ff6562dbd34b6350df9190fe2f7c86950d3af4", "content_id": "ddf565840710e1213e7ed40b072b155b86894321", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2911, "license_type": "no_license", "max_line_length": 72, "num_lines": 98, "path": "/nerdlabs/blog/admin.py", "repo_name": "lvidarte/django-nerdlabs", "src_encoding": "UTF-8", "text": "\"\"\"\nAuthor: Leo Vidarte <http://nerdlabs.com.ar>\n\nThis is free software,\nyou can redistribute it and/or modify it\nunder the terms of the GPL version 3\nas published by the Free Software Foundation.\n\n\"\"\"\n\nfrom django.contrib import admin\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom nerdlabs.blog.models import Post, PostFile\nfrom nerdlabs.common.models import Tag, File\n\n\nclass TagAdmin(admin.ModelAdmin):\n prepopulated_fields = {'slug': ('title',)}\n\n\nclass FileAdmin(admin.ModelAdmin):\n list_display = ('file', 'thumbnail', 'get_size',\n 'mime', 'created', 'is_image')\n list_filter = ('tags', 'is_image')\n readonly_fields = ('mime', 'size', 'width', 'height', 'is_image',\n 'created', 'modified')\n filter_horizontal = ('tags',)\n fieldsets = (\n (None, {\n 'fields': ('file', 'alt', 'author', 'created', 'modified')\n }),\n (None, {\n 'fields': ('mime', 'size',)\n }),\n (None, {\n 'fields': ('is_image', 'width', 'height')\n }),\n (_('Tags'), {\n 'classes': ('collapse',),\n 'fields': ('tags',),\n }),\n )\n\n def formfield_for_foreignkey(self, db_field, request, **kwargs):\n if db_field.name == 'author':\n kwargs['initial'] = request.user.id\n return db_field.formfield(**kwargs)\n return super(FileAdmin, self).formfield_for_foreignkey(\n db_field, request, **kwargs)\n\n\nclass PostFileInline(admin.TabularInline):\n model = PostFile\n extra = 0\n\n\nclass PostAdmin(admin.ModelAdmin):\n list_display = ('slug', 'title', 'author', 'publish', 'get_status')\n list_filter = ('publish', 'tags', 'status', 'author')\n search_fields = ('title', 'body')\n prepopulated_fields = {'slug': ('title',)}\n inlines = (PostFileInline,)\n filter_horizontal = ('tags',)\n readonly_fields = ('created', 'modified')\n fieldsets = (\n (None, {\n 'fields': ('title', 'slug', 'author', 'body', 'markup')\n }),\n (None, {\n 'fields': ('publish', 'status', 'allow_comments')\n }),\n (None, {\n 'fields': ('created', 'modified')\n }),\n (_('Tags'), {\n 'classes': ('collapse',),\n 'fields': ('tags',),\n }),\n )\n\n def formfield_for_foreignkey(self, db_field, request, **kwargs):\n if db_field.name == 'author':\n kwargs['initial'] = request.user.id\n return db_field.formfield(**kwargs)\n return super(PostAdmin, self).formfield_for_foreignkey(\n db_field, request, **kwargs)\n\n class Media:\n css = {'all': (\n 'http://fonts.googleapis.com/css?family=Inconsolata',\n '/media/css/admin.css',\n )}\n\n\nadmin.site.register(Tag, TagAdmin)\nadmin.site.register(File, FileAdmin)\nadmin.site.register(Post, PostAdmin)\n\n" }, { "alpha_fraction": 0.7370074391365051, "alphanum_fraction": 0.7410051226615906, "avg_line_length": 28.67796516418457, "blob_id": "154aba1ea9634a1f2eac667c8634c6a2255baf44", "content_id": "a2ab50da8a5ea246eb46a586a61c07b15180fad3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 3502, "license_type": "no_license", "max_line_length": 81, "num_lines": 118, "path": "/conf/uwsgi/nerdlabs.ini", "repo_name": "lvidarte/django-nerdlabs", "src_encoding": "UTF-8", "text": "[uwsgi]\n\n## Show all options at http://projects.unbit.it/uwsgi/wiki/Doc\n\n## gid\n##\n## Set the gid under the uWSGI server will run.\ngid = xleo\n\n## uid\n##\n## Set the uid under the uWSGI server will run.\nuid = xleo\n\n## pidfile\n##\n## Write the pidfile to the specified file before privileges drop.\npidfile = /tmp/nerdlabs.pid\n\n## socket\n##\n## Specify the path (in case of UNIX socket) or the address (for INET sockets)\n## of the socket where the uwsgi clients (your webserver) will connect to. You\n## can specify up to 8 socket options. When used as a command line arg you can\n## use the '-s' shortcut.\nsocket = /tmp/nerdlabs.sock\n\n## chmod-socket or chmod\n##\n## Unix sockets are filesystem object that must obey to unix permission scheme.\n## You can set unix sockets permissions with this option if your webserver has\n## no access to the uWSGI socket.\n##\n## When used as a flag this option will set permissions to 666, otherwise the\n## spcified scheme will be used.\nchmod = 666\n\n## logto\n##\n## Run the processes in background using a logfile or a udp server.\nlogto = /var/log/uwsgi/app/nerdlabs.log\n\n## vacuum\n##\n## Automatically remove unix socket and pidfiles on server exit.\nvacuum = true\n\n## master\n##\n## Enable the master process.\nmaster = true\n\n## processes or workers\n##\n## Set the number of workers for preforking mode. This is the base for easy\n## and safe concurrency in your app. More workers you add, more concurrent\n## requests you can manage. Each worker correspond to a system process, so it\n## consumes memory, choose carefully the right number. You can easily drop\n## down your system if you set a too high value.\n##\n## When used as a command line arg you can shortcut it with -p\nprocesses = 4\n\n## max-requests\n##\n## Set the maximum number of requests for each worker. When a worker reaches\n## this number it will get recycled. You can use this option to dumb fight\n## memory leaks (even if reload-on-as and reload-on-rss are more useful for\n## this kind of problem).\nmax-requests = 5000\n\n## enable-threads (required by newrelic)\n##\n## By default uWSGI does not enable threading support within the\n## Python interpreter core. This means it is not possible to create background\n## threads from Python code. As the Python agent relies on being able to\n## create background threads, this option is required.\nenable-threads = true\n\n## single-interpreter (required by newrelic)\n##\n## By default uWSGI will execute Python code within a sub interpreter\n## of the process rather than the main Python interpreter created when Python\n## is first initialised. This is done to allow multiple separate Python web\n## applications to be run within the one process but to be sufficiently separated\n## so as to not interfere with each other.\nsingle-interpreter = true\n\n## harakiri\n##\n## This will set the harakiri timeout (look at the wiki home page for a better\n## understanding). Every request that will take longer than the seconds\n## specified in the harakiri timeout will be dropped and the corresponding\n## worker recycled.\nharakiri = 60\n\n## home or virtualenv or venv or pyhome [python plugin required]\n##\n## Set the specified virtualenv for python apps.\nhome = /home/xleo/src/lvidarte/django-nerdlabs/env\n\n## dir\n##\n## Move to a specific directory\nchdir = /home/xleo/src/lvidarte/django-nerdlabs\n\n## wsgi file\n##\n## Load the specified python WSGI file\nwsgi-file = wsgi.py\n\n## env\n##\n## Set an environment variable.\nenv = DJANGO_SETTINGS_MODULE=settings\n\n### WSGI plugin for uWSGI\nplugins = python\n" }, { "alpha_fraction": 0.4326569437980652, "alphanum_fraction": 0.43609824776649475, "avg_line_length": 29.089284896850586, "blob_id": "5cf39c3a3eb265b72c36c85141b3d80424b9c9a3", "content_id": "712d023452fd475cb5eab7fcd993309a3b85ed76", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8428, "license_type": "no_license", "max_line_length": 77, "num_lines": 280, "path": "/nerdlabs/common/tools/parser.py", "repo_name": "lvidarte/django-nerdlabs", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n\"\"\"\nAuthor: Leo Vidarte <http://nerdlabs.com.ar>\n\nThis is free software,\nyou can redistribute it and/or modify it\nunder the terms of the GPL version 3\nas published by the Free Software Foundation.\n\n\"\"\"\n\nimport re\n\n\n# Tag types\nSHOW = 1 # [[label|200]]\nLINK = 2 # {{label|text}}\n\n# Markups\nMARKDOWN = 1\nREST = 2 # reStructuredText\nTEXT = 3\n\n\ndef parse_media_tags(id, text, files, markup):\n \"\"\"Parse media tags for easy insert of files into body text.\n\n Show tags:\n [[label_img]] show image\n [[label_img|size]] show thumb\n [[label_img|size|target]] show thumb + link to target\n [[label_pdf]] link to pdf\n [[label_pdf|text]] link to pdf with text\n\n Link tags:\n {{label_img}} link to image\n {{label_file}} link to file\n {{label_file|text}} link to file with text\n\n \"\"\"\n\n # SHOW [[label]]\n for tag in re.findall('\\[\\[[^\\]]+\\]\\]', text):\n ptag = parse_tag(id, tag, files, type=SHOW)\n if ptag:\n src = get_src(ptag, markup)\n if src:\n text = text.replace(tag, src)\n\n # LINK {{label}}\n for tag in re.findall('\\{\\{[^\\}]+\\}\\}', text):\n ptag = parse_tag(id, tag, files, type=LINK)\n if ptag:\n src = get_src(ptag, markup)\n if src:\n text = text.replace(tag, src)\n\n return text\n\n\ndef parse_tag(id, tag, files, type):\n _ = {\n 'obj' : None,\n 'width' : None,\n 'text' : None,\n 'target' : None,\n 'css_class': None,\n 'type' : type\n }\n\n # Split tags into tokens\n if type == SHOW:\n tokens = tag.strip('[] ').split('|')\n _['css_class'] = get_css_class(tag)\n elif type == LINK:\n tokens = tag.strip('{} ').split('|')\n _['css_class'] = None\n else:\n return None\n\n # Avoid empty tags\n lent = len(tokens)\n if lent < 1:\n return None\n\n # Get object which is pointed by label\n _['obj'] = get_object(id, files, tokens[0])\n if not _['obj']:\n return None\n\n # Images\n if _['obj'].file.is_image:\n # Analysis second token\n if lent > 1:\n # Width\n if tokens[1].isdigit() and type==SHOW:\n _['width'] = tokens[1]\n # Just a link\n elif type == LINK:\n _['text'] = tokens[1]\n # Tag \"show\" image that becomes \"link\"\n elif type == SHOW:\n _['text'] = tokens[1]\n _['type'] = LINK\n # Not implemented\n else:\n return None\n # Analysis third token\n if lent > 2:\n if type == SHOW:\n _['target'] = get_object(id, files, tokens[2])\n if not _['target']:\n return None\n # Not implemented\n else:\n return None\n # Others\n else:\n # Analysis second token (only text is allowed)\n if lent > 1:\n _['text'] = tokens[1]\n\n return _\n\n\ndef get_css_class(tag):\n # Image align\n if tag[2] == ' ' and tag[-3] == ' ':\n return \"align-center\"\n elif tag[2] == ' ':\n return \"align-right\"\n elif tag[-3] == ' ':\n return \"align-left\"\n else:\n return \"align-none\"\n\n\ndef get_object(id, files, label):\n try:\n object = files.through.objects.get(post=id, label=label)\n except:\n object = None\n return object\n\n\ndef get_src(ptag, markup):\n # Thumb with link to file\n if ptag['target']:\n if markup == MARKDOWN:\n return u''.join((\n '<a href=\"%s\" title=\"%s\">',\n '<img src=\"%s\" alt=\"%s\" class=\"%s\" />',\n '</a>')) % (\n ptag['target'].file.get_absolute_url(),\n ptag['target'].description,\n ptag['obj'].file.get_url_wthumb(ptag['width']),\n ptag['obj'].file.alt,\n ptag['css_class']) \n elif markup == REST:\n return u'\\n'.join((\n '.. image:: %s',\n ' :alt: %s',\n ' :target: %s',\n ' :class: %s')) % (\n ptag['obj'].file.get_url_wthumb(ptag['width']),\n ptag['obj'].file.alt,\n ptag['target'].file.get_absolute_url(),\n ptag['css_class'])\n elif markup == TEXT:\n return ptag['obj'].file.get_absolute_url()\n\n # Thumb\n elif ptag['width']:\n if markup == MARKDOWN:\n return u'<div class=\"%s\"><img src =\"%s\" alt=\"%s\" /></div>' % (\n ptag['css_class'],\n ptag['obj'].file.get_url_wthumb(ptag['width']),\n ptag['obj'].file.alt,\n ) \n elif markup == REST:\n return u'\\n'.join((\n '.. image:: %s',\n ' :alt: %s',\n ' :class: %s')) % (\n ptag['obj'].file.get_url_wthumb(width=ptag['width']),\n ptag['obj'].file.alt,\n ptag['css_class'])\n elif markup == TEXT:\n return ptag['obj'].file.get_url_wthumb(width=ptag['width'])\n\n # Link with text\n elif ptag['text']:\n if markup == MARKDOWN:\n return u'<a href=\"%s\" title=\"%s\">%s</a>' % (\n ptag['obj'].file.get_absolute_url(),\n ptag['obj'].description,\n ptag['text']) \n elif markup == REST:\n return u'`%s <%s>`_' % (\n ptag['text'],\n ptag['obj'].file.get_absolute_url())\n elif markup == TEXT:\n return u'%s <%s>' % (\n ptag['text'],\n ptag['obj'].file.get_absolute_url())\n\n # Image\n elif ptag['obj'].file.is_image and ptag['type'] == SHOW:\n if markup == MARKDOWN:\n return u'<div class=\"%s\"><img src=\"%s\" alt=\"%s\" /></div>' % (\n ptag['css_class'],\n ptag['obj'].file.get_absolute_url(),\n ptag['obj'].file.alt,\n )\n elif markup == REST:\n return u'\\n'.join((\n '.. image:: %s',\n ' :alt: %s',\n ' :class: %s')) % (\n ptag['obj'].file.get_absolute_url(),\n ptag['obj'].file.alt,\n ptag['css_class'])\n elif markup == TEXT:\n return ptag['obj'].file.get_absolute_url()\n\n # Link\n else:\n if markup == MARKDOWN:\n return u'<a href=\"%s\" title=\"%s\">%s</a>' % (\n ptag['obj'].file.get_absolute_url(),\n ptag['obj'].description,\n ptag['obj'].file.get_absolute_url()) \n elif markup == REST:\n return u'`%s <%s>`_' % (\n ptag['obj'].file.get_absolute_url(),\n ptag['obj'].file.get_absolute_url())\n elif markup == TEXT:\n return ptag['obj'].file.get_absolute_url()\n\n return None\n\n\ndef markdown_to_html(text):\n try:\n import markdown2\n except ImportError:\n raise Exception(\"Markdown2 is not installed.\")\n style = {\n \"extras\": {\n \"code-friendly\": None,\n },\n \"safe_mode\": False,\n }\n return markdown2.markdown(text, style)\n\n\ndef rest_to_html(rest_src):\n try:\n from docutils import core\n except ImportError:\n raise Exception(\"Docutils is not installed.\")\n parts = core.publish_parts(source=rest_src, writer_name='html')\n return parts['body_pre_docinfo'] + parts['fragment']\n\n\ndef text_to_html(text_src):\n return text_src.replace('\\n', '<br />')\n\n\ndef parse_tag_more(src, stop=False, link='', text_link='more'):\n if stop and link:\n i = src.find('[:more:]')\n if i >= 0:\n src = src[:i] + u\"\"\"<p class=\"read-more\">\n <a href=\"%s#more\" title=\"\">%s</a> »\n </p>\"\"\" % (link, text_link)\n else:\n src = src.replace(u'[:more:]', u'<a id=\"more\"></a>')\n return src\n\n\n" }, { "alpha_fraction": 0.6893523335456848, "alphanum_fraction": 0.693743109703064, "avg_line_length": 22.35897445678711, "blob_id": "d432e43e198dd6d9145a9889dd4762ce48c69808", "content_id": "7ec2188c0d638d0f318f4068b911e74eddfcc5e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 911, "license_type": "no_license", "max_line_length": 49, "num_lines": 39, "path": "/nerdlabs/blog/feeds.py", "repo_name": "lvidarte/django-nerdlabs", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n\"\"\"\nAuthor: Leo Vidarte <http://nerdlabs.com.ar>\n\nThis is free software,\nyou can redistribute it and/or modify it\nunder the terms of the GPL version 3\nas published by the Free Software Foundation.\n\n\"\"\"\n\nfrom django.conf import settings\nfrom django.contrib.syndication.views import Feed\nfrom django.contrib.sites.models import Site\nfrom django.core.urlresolvers import reverse\n\nfrom nerdlabs.blog.models import Post\n\n\nclass PostFeed(Feed):\n _site = Site.objects.get_current()\n title = settings.BLOG_NAME\n description = settings.BLOG_DESCRIPTION\n\n def link(self):\n return reverse('blog-post-list')\n\n def items(self):\n return Post.published.all()[:10]\n\n def item_pubdate(self, item):\n return item.publish\n\n def item_author_name(self, item):\n return '%s' % (item.author)\n\n def item_description(self, item):\n return item.get_body_html()\n" }, { "alpha_fraction": 0.6043083667755127, "alphanum_fraction": 0.6065759658813477, "avg_line_length": 23.5, "blob_id": "8d941caeaf82f2d10d169920cb9c302493fa177f", "content_id": "4e486e30c46a453788b22033ecf8619c78d12208", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 882, "license_type": "no_license", "max_line_length": 77, "num_lines": 36, "path": "/nerdlabs/common/tools/search.py", "repo_name": "lvidarte/django-nerdlabs", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n\"\"\"\nAuthor: Leo Vidarte <http://nerdlabs.com.ar>\n\nThis is free software,\nyou can redistribute it and/or modify it\nunder the terms of the GPL version 3\nas published by the Free Software Foundation.\n\n\"\"\"\n\n\nfrom stop_words import STOP_WORDS\ndef strip_stop_words(text):\n return [w for w in text.split() if strip_diacritics(w) not in STOP_WORDS]\n\n\nfrom unicodedata import normalize, category\ndef strip_diacritics(word):\n return ''.join(\n [c for c in normalize('NFD', word) if category(c) == 'Ll'])\n\n\nfrom django.db.models import Q\ndef get_q(words_list, field_name, op='and'):\n if op not in ('and', 'or'):\n raise Exception(\"op must be 'and' or 'or'\")\n r = None\n for w in words_list:\n q = Q(**{field_name + '__icontains': w})\n if r:\n r = r & q if op == 'and' else r | q\n else:\n r = q\n return r\n" }, { "alpha_fraction": 0.516339898109436, "alphanum_fraction": 0.6993464231491089, "avg_line_length": 16, "blob_id": "3235d36508712b0183f3b367258007b76780bf65", "content_id": "2d5fa56f20ddb9639e31f181423799f4b543d499", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 153, "license_type": "no_license", "max_line_length": 27, "num_lines": 9, "path": "/requirements.txt", "repo_name": "lvidarte/django-nerdlabs", "src_encoding": "UTF-8", "text": "Django==1.7.3\nPillow==2.6.1\nPygments==1.5\nargparse==1.2.1\ndistribute==0.6.24\ndocutils==0.9.1\nwsgiref==0.1.2\nmarkdown2==2.3.0\ndjango-markdown-deux==1.0.5\n" }, { "alpha_fraction": 0.6826087236404419, "alphanum_fraction": 0.686956524848938, "avg_line_length": 23.210525512695312, "blob_id": "ee7afdc999cd32851c2c6d5dfb15657e0ac3a4aa", "content_id": "1902bb9b41a73e365e3d667392d32a5d102ead86", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 460, "license_type": "no_license", "max_line_length": 64, "num_lines": 19, "path": "/nerdlabs/blog/managers.py", "repo_name": "lvidarte/django-nerdlabs", "src_encoding": "UTF-8", "text": "\"\"\"\nAuthor: Leo Vidarte <http://nerdlabs.com.ar>\n\nThis is free software,\nyou can redistribute it and/or modify it\nunder the terms of the GPL version 3\nas published by the Free Software Foundation.\n\n\"\"\"\n\nfrom django.db import models\nimport datetime\n\n\nclass PublicManager(models.Manager):\n def get_queryset(self):\n return super(PublicManager, self).get_queryset().filter(\n status__gte=2,\n publish__lte=datetime.datetime.now())\n" }, { "alpha_fraction": 0.5799600481987, "alphanum_fraction": 0.5929535031318665, "avg_line_length": 31.25806427001953, "blob_id": "9076ef6f13f44a189dc7df31e55d15d9a08c0223", "content_id": "183172cc8bcac29ed6e663fdb09ae24fad1e34ef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4002, "license_type": "no_license", "max_line_length": 76, "num_lines": 124, "path": "/nerdlabs/common/models.py", "repo_name": "lvidarte/django-nerdlabs", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n\"\"\"\nAuthor: Leo Vidarte <http://nerdlabs.com.ar>\n\nThis is free software,\nyou can redistribute it and/or modify it\nunder the terms of the GPL version 3\nas published by the Free Software Foundation.\n\n\"\"\"\n\nfrom PIL import Image\nfrom mimetypes import guess_type\n\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.contrib.auth.models import User\n#from django.contrib.sites.models import Site\n\n#current_site = Site.objects.get_current().domain\n\n\nclass Tag(models.Model):\n \"\"\"Tag model.\"\"\"\n title = models.CharField(_('title'), max_length=100)\n slug = models.SlugField(_('slug'), unique=True)\n\n class Meta:\n verbose_name = _('tag')\n verbose_name_plural = _('tags')\n db_table = 'nerdlabs_common_tags'\n ordering = ('title',)\n\n def __unicode__(self):\n return u'%s' % self.title\n\n @models.permalink\n def get_absolute_url(self):\n return ('blog-post-list-by-tag', None, {'slug': self.slug})\n\n def get_total_posts(self):\n from nerdlabs.blog.models import Post\n return Post.objects.filter(tags__id=self.id).count()\n\n\nclass File(models.Model):\n file = models.FileField(_('file'),\n upload_to='%Y/%m/%d', max_length=512)\n alt = models.CharField(_('alt'), max_length=256, blank=True)\n size = models.IntegerField(_('size'), blank=True, default=0)\n mime = models.CharField(_('mimetype'), max_length=256, blank=True)\n width = models.IntegerField(_('width'), blank=True, default=0)\n height = models.IntegerField(_('height'), blank=True, default=0)\n is_image = models.BooleanField(_('is image'), blank=True, default=False)\n created = models.DateTimeField(_('created'), auto_now_add=True)\n modified = models.DateTimeField(_('modified'), auto_now=True)\n author = models.ForeignKey(User, blank=True, null=True)\n tags = models.ManyToManyField(Tag, blank=True)\n\n class Meta:\n verbose_name = _('file')\n verbose_name_plural = _('files')\n db_table = 'nerdlabs_common_files'\n ordering = ('-created',)\n get_latest_by = 'created'\n\n #@models.permalink\n def get_absolute_url(self):\n #return ''.join(['http://', current_site, self.file.url])\n return self.file.url\n\n @models.permalink\n def get_url_wthumb(self, width):\n return ('cache-imgs-w', None, \n {'width': width, 'url': self.get_absolute_url()})\n\n @models.permalink\n def get_url_hthumb(self, height):\n return ('cache-imgs-h', None, \n {'height': height, 'url': self.get_absolute_url()})\n\n def thumbnail(self):\n if self.is_image:\n return u'<img src=\"%s\" alt=\"%s\" />' % (\n self.get_url_hthumb(75), self.alt)\n else:\n return u''\n\n thumbnail.short_description = _('thumbnail')\n thumbnail.allow_tags = True\n\n def get_size(self):\n if self.size < 1024: # 1Kib\n return \"%s bytes\" % self.size\n elif self.size < 1048576: # < 1Mib\n return \"%s Kib\" % round(self.size / float(1024), 1)\n else: # >= 1Mib\n return \"%s Mib\" % round(self.size / float(1048576), 1)\n\n get_size.short_description = _('size')\n get_size.allow_tags = True\n get_size.admin_order_field = 'size'\n\n def save(self, force_insert=False, force_update=False):\n setattr(self, 'size', self.file.size) \n mime = guess_type(self.file.name)[0]\n setattr(self, 'mime', mime) \n\n if mime.split('/')[0] == 'image':\n setattr(self, 'is_image', 1) \n img = Image.open(self.file)\n width, height = img.size\n setattr(self, 'width', width) \n setattr(self, 'height', height) \n else:\n setattr(self, 'is_image', 0) \n setattr(self, 'width', 0) \n setattr(self, 'height', 0) \n\n super(File, self).save(force_insert, force_update)\n\n def __unicode__(self):\n return u'%s' % self.file.name\n\n\n" }, { "alpha_fraction": 0.5886031985282898, "alphanum_fraction": 0.5923094749450684, "avg_line_length": 31.4436092376709, "blob_id": "1464d3810a451a4ab9431b71e5f75048e6c46f93", "content_id": "1659944a37e409272479b7295744d7652658aa67", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4317, "license_type": "no_license", "max_line_length": 78, "num_lines": 133, "path": "/nerdlabs/blog/models.py", "repo_name": "lvidarte/django-nerdlabs", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n\"\"\"\nAuthor: Leo Vidarte <http://nerdlabs.com.ar>\n\nThis is free software,\nyou can redistribute it and/or modify it\nunder the terms of the GPL version 3\nas published by the Free Software Foundation.\n\n\"\"\"\n\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.contrib.auth.models import User\n\nfrom nerdlabs.blog.managers import PublicManager\nfrom nerdlabs.common.models import Tag, File\nfrom nerdlabs.common.tools import parser\n\nimport datetime\n\nDRAFT_STATUS = 1\nPUBLIC_STATUS = 2\n\nSTATUS_CHOICES = (\n (DRAFT_STATUS, _('Draft')),\n (PUBLIC_STATUS, _('Public')),\n)\n\nMARKDOWN_MARKUP = 1\nREST_MARKUP = 2\nTEXT_MARKUP = 3\n\nMARKUP_CHOICES = (\n (MARKDOWN_MARKUP, _('Markdown')),\n (REST_MARKUP, _('reStructuredText')),\n (TEXT_MARKUP, _('Text')),\n)\n\n\nclass PostFile(models.Model):\n post = models.ForeignKey('Post')\n file = models.ForeignKey(File)\n label = models.CharField(_('label'), max_length=128, blank=True)\n\n class Meta:\n verbose_name = _('post file')\n verbose_name_plural = _('post files')\n db_table = 'nerdlabs_blog_post_file'\n\n\nclass Post(models.Model):\n \"\"\"Post model.\"\"\"\n title = models.CharField(_('title'), max_length=200)\n slug = models.SlugField(_('slug'), unique_for_date='publish')\n author = models.ForeignKey(User, blank=True, null=True)\n body = models.TextField(_('body'), help_text=_(\"\"\"\n Media tags images: [[label]] [[label|size]]\n Links: {{label}} {{label|text}}\n Read more: [:more:]\"\"\"))\n markup = models.IntegerField(_('markup'), choices=MARKUP_CHOICES,\n default=MARKDOWN_MARKUP)\n status = models.IntegerField(_('status'),\n choices=STATUS_CHOICES, default=DRAFT_STATUS)\n allow_comments = models.BooleanField(_('allow comments'), default=True)\n publish = models.DateTimeField(_('publish'),\n default=datetime.datetime.now)\n created = models.DateTimeField(_('created'), auto_now_add=True)\n modified = models.DateTimeField(_('modified'), auto_now=True)\n tags = models.ManyToManyField(Tag, blank=True)\n files = models.ManyToManyField(File, blank=True, through='PostFile')\n\n objects = models.Manager()\n published = PublicManager()\n\n class Meta:\n verbose_name = _('post')\n verbose_name_plural = _('posts')\n db_table = 'nerdlabs_blog_posts'\n ordering = ('-publish',)\n get_latest_by = 'publish'\n\n def __unicode__(self):\n return u'%s' % self.title\n\n def get_status(self):\n return True if self.status == 2 else False\n\n get_status.short_description = _('status')\n get_status.boolean = True\n get_status.admin_order_field = 'status'\n\n @models.permalink\n def get_absolute_url(self):\n return ('blog-post-detail', None, {\n 'year': self.publish.year,\n 'month': self.publish.month,\n 'day': self.publish.day,\n 'slug': self.slug\n })\n\n def get_body_html(self, less=False):\n body = parser.parse_media_tags(self.id, self.body,\n self.files, self.markup)\n\n if self.markup == MARKDOWN_MARKUP:\n body = parser.markdown_to_html(body)\n elif self.markup == REST_MARKUP:\n body = parser.rest_to_html(body)\n elif self.markup == TEXT_MARKUP:\n body = parser.text_to_html(body) \n\n if less:\n return parser.parse_tag_more(body, stop=True,\n link=self.get_absolute_url(),\n text_link=_('Read more'))\n else:\n return parser.parse_tag_more(body)\n\n def get_body_html_less(self):\n return self.get_body_html(less=True)\n\n def get_next_post(self):\n # @See django.db.models.Model.get_next_by_FOO\n if getattr(self, '_next_post', False) == False:\n self._next_post = self.get_next_by_publish(status__gte=2)\n return self._next_post\n\n def get_previous_post(self):\n if getattr(self, '_previous_post', False) == False:\n self._previous_post = self.get_previous_by_publish(status__gte=2)\n return self._previous_post\n\n\n" }, { "alpha_fraction": 0.6436781883239746, "alphanum_fraction": 0.6781609058380127, "avg_line_length": 18.33333396911621, "blob_id": "e8505825665bf95586bdf7f816af0d01137efd84", "content_id": "4e0100e84cae3346e9b5779d1f467b06f9590c80", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 174, "license_type": "no_license", "max_line_length": 45, "num_lines": 9, "path": "/bin/serve.sh", "repo_name": "lvidarte/django-nerdlabs", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nADDR=${1:-localhost}\nPORT=${2:-8000}\n\necho \"Running server on $ADDR:$PORT ..\"\nsource env/bin/activate \\\n && python manage.py runserver $ADDR:$PORT\ndeactivate\n" }, { "alpha_fraction": 0.6023856997489929, "alphanum_fraction": 0.6073558926582336, "avg_line_length": 27.685714721679688, "blob_id": "78a4e8ba5e2774994168f385a4890b9b83da9079", "content_id": "c4b415477cc06b5183abaf683438f6b00ceeaf12", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "Python", "length_bytes": 1006, "license_type": "no_license", "max_line_length": 69, "num_lines": 35, "path": "/nerdlabs/cache/middleware.py", "repo_name": "lvidarte/django-nerdlabs", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n\"\"\"\nAuthor: Leo Vidarte <http://nerdlabs.com.ar>\n\nThis is free software,\nyou can redistribute it and/or modify it\nunder the terms of the GPL version 3\nas published by the Free Software Foundation.\n\n\"\"\"\n\nfrom django.conf import settings\nfrom django.core.cache import cache\n\n\nclass NginxMemcacheMiddleware:\n def process_response(self, request, response):\n path = request.get_full_path()\n\n if getattr(settings, 'CACHE_ENABLED', False) \\\n and request.method == \"GET\" \\\n and response.status_code == 200 \\\n and not request.user.is_authenticated():\n\n #from nerdlabs.cache import make_key\n #from datetime import datetime\n #key = make_key(path, 'nerdlabs', '')\n #action = \"found\" if cache.get(key) else \"not found\"\n #f = open('/tmp/cache.log', 'a')\n #f.write(\"[%s] %s: %s\\n\" % (datetime.now(), action, key))\n\n cache.set(path, response.content)\n\n return response\n\n\n" }, { "alpha_fraction": 0.4137779772281647, "alphanum_fraction": 0.4139973819255829, "avg_line_length": 12.485207557678223, "blob_id": "dd98a76ba9f724500d31dc50bab1afdbd419ef47", "content_id": "f7b9d7d34e133034274f3bfa080e3462887b90a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4558, "license_type": "no_license", "max_line_length": 45, "num_lines": 338, "path": "/nerdlabs/common/tools/stop_words.py", "repo_name": "lvidarte/django-nerdlabs", "src_encoding": "UTF-8", "text": "\"\"\"\nAuthor: Leo Vidarte <http://nerdlabs.com.ar>\n\nThis is free software,\nyou can redistribute it and/or modify it\nunder the terms of the GPL version 3\nas published by the Free Software Foundation.\n\n\"\"\"\n\nSTOP_WORDS = (\n 'a',\n 'aca',\n 'ahi',\n 'ajena',\n 'ajenas',\n 'ajeno',\n 'ajenos',\n 'al',\n 'algo',\n 'algun',\n 'alguna',\n 'algunas',\n 'alguno',\n 'algunos',\n 'alla',\n 'alli',\n 'ambos',\n 'ampleamos',\n 'ante',\n 'antes',\n 'aquel',\n 'aquella',\n 'aquellas',\n 'aquello',\n 'aquellos',\n 'aqui',\n 'arriba',\n 'asi',\n 'atras',\n 'aun',\n 'aunque',\n 'bajo',\n 'bastante',\n 'bien',\n 'cabe',\n 'cada',\n 'casi',\n 'cierta',\n 'ciertas',\n 'cierto',\n 'ciertos',\n 'como',\n 'con',\n 'conmigo',\n 'conseguimos',\n 'conseguir',\n 'consigo',\n 'consigue',\n 'consiguen',\n 'consigues',\n 'contigo',\n 'contra',\n 'cual',\n 'cuales',\n 'cualquier',\n 'cualquiera',\n 'cualquieras',\n 'cuan',\n 'cuando',\n 'cuanta',\n 'cuantas',\n 'cuanto',\n 'cuantos',\n 'de',\n 'dejar',\n 'del',\n 'demas',\n 'demasiada',\n 'demasiadas',\n 'demasiado',\n 'demasiados',\n 'dentro',\n 'desde',\n 'donde',\n 'dos',\n 'el',\n 'ella',\n 'ellas',\n 'ello',\n 'ellos',\n 'empleais',\n 'emplean',\n 'emplear',\n 'empleas',\n 'empleo',\n 'en',\n 'encima',\n 'entonces',\n 'entre',\n 'era',\n 'eramos',\n 'eran',\n 'eras',\n 'eres',\n 'es',\n 'esa',\n 'esas',\n 'ese',\n 'eso',\n 'esos',\n 'esta',\n 'estaba',\n 'estado',\n 'estais',\n 'estamos',\n 'estan',\n 'estar',\n 'estas',\n 'este',\n 'esto',\n 'estos',\n 'estoy',\n 'etc',\n 'fin',\n 'fue',\n 'fueron',\n 'fui',\n 'fuimos',\n 'gueno',\n 'ha',\n 'hace',\n 'haceis',\n 'hacemos',\n 'hacen',\n 'hacer',\n 'haces',\n 'hacia',\n 'hago',\n 'hasta',\n 'incluso',\n 'intenta',\n 'intentais',\n 'intentamos',\n 'intentan',\n 'intentar',\n 'intentas',\n 'intento',\n 'ir',\n 'jamas',\n 'junto',\n 'juntos',\n 'la',\n 'largo',\n 'las',\n 'lo',\n 'los',\n 'mas',\n 'me',\n 'menos',\n 'mi',\n 'mia',\n 'mias',\n 'mientras',\n 'mio',\n 'mios',\n 'mis',\n 'misma',\n 'mismas',\n 'mismo',\n 'mismos',\n 'modo',\n 'mucha',\n 'muchas',\n 'muchisima',\n 'muchisimas',\n 'muchisimo',\n 'muchisimos',\n 'mucho',\n 'muchos',\n 'muy',\n 'nada',\n 'ni',\n 'ningun',\n 'ninguna',\n 'ningunas',\n 'ninguno',\n 'ningunos',\n 'no',\n 'nos',\n 'nosotras',\n 'nosotros',\n 'nuestra',\n 'nuestras',\n 'nuestro',\n 'nuestros',\n 'nunca',\n 'os',\n 'otra',\n 'otras',\n 'otro',\n 'otros',\n 'para',\n 'parecer',\n 'pero',\n 'poca',\n 'pocas',\n 'poco',\n 'pocos',\n 'podeis',\n 'podemos',\n 'poder',\n 'podria',\n 'podriais',\n 'podriamos',\n 'podrian',\n 'podrias',\n 'por',\n 'por que',\n 'porque',\n 'primero',\n 'primero desde',\n 'puede',\n 'pueden',\n 'puedo',\n 'pues',\n 'que',\n 'querer',\n 'quien',\n 'quienes',\n 'quienesquiera',\n 'quienquiera',\n 'quiza',\n 'quizas',\n 'sabe',\n 'sabeis',\n 'sabemos',\n 'saben',\n 'saber',\n 'sabes',\n 'se',\n 'segun',\n 'ser',\n 'si',\n 'siempre',\n 'siendo',\n 'sin',\n 'sino',\n 'so',\n 'sobre',\n 'sois',\n 'solamente',\n 'solo',\n 'somos',\n 'soy',\n 'sr',\n 'sra',\n 'sres',\n 'sta',\n 'su',\n 'sus',\n 'suya',\n 'suyas',\n 'suyo',\n 'suyos',\n 'tal',\n 'tales',\n 'tambien',\n 'tambien',\n 'tampoco',\n 'tan',\n 'tanta',\n 'tantas',\n 'tanto',\n 'tantos',\n 'te',\n 'teneis',\n 'tenemos',\n 'tener',\n 'tengo',\n 'ti',\n 'tiempo',\n 'tiene',\n 'tienen',\n 'toda',\n 'todas',\n 'todo',\n 'todos',\n 'tomar',\n 'trabaja',\n 'trabajais',\n 'trabajamos',\n 'trabajan',\n 'trabajar',\n 'trabajas',\n 'trabajo',\n 'tras',\n 'tu',\n 'tus',\n 'tuya',\n 'tuyo',\n 'tuyos',\n 'ultimo',\n 'un',\n 'una',\n 'unas',\n 'uno',\n 'unos',\n 'usa',\n 'usais',\n 'usamos',\n 'usan',\n 'usar',\n 'usas',\n 'uso',\n 'usted',\n 'ustedes',\n 'va',\n 'vais',\n 'valor',\n 'vamos',\n 'van',\n 'varias',\n 'varios',\n 'vaya',\n 'verdad',\n 'verdadera',\n 'vosotras',\n 'vosotros',\n 'voy',\n 'vuestra',\n 'vuestras',\n 'vuestro',\n 'vuestros',\n 'y',\n 'ya',\n 'yo',\n)\n" }, { "alpha_fraction": 0.6180790662765503, "alphanum_fraction": 0.6214689016342163, "avg_line_length": 29.86046600341797, "blob_id": "bffe884766b4c154594fa8578cede58f91bb21f6", "content_id": "be55a4a33afb336cbca800e767496524dac33671", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2655, "license_type": "no_license", "max_line_length": 73, "num_lines": 86, "path": "/nerdlabs/blog/views.py", "repo_name": "lvidarte/django-nerdlabs", "src_encoding": "UTF-8", "text": "\"\"\"\nAuthor: Leo Vidarte <http://nerdlabs.com.ar>\n\nThis is free software,\nyou can redistribute it and/or modify it\nunder the terms of the GPL version 3\nas published by the Free Software Foundation.\n\n\"\"\"\n\nfrom django.shortcuts import render, get_object_or_404\nfrom django.conf import settings\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n\nfrom nerdlabs.blog.models import Post\nfrom nerdlabs.common.models import Tag\nfrom nerdlabs.common.tools import search\n\n\ndef post_list(request):\n posts_all = Post.published.all()\n paginator = Paginator(posts_all, settings.BLOG_PAGESIZE)\n page = request.GET.get('page')\n try:\n posts = paginator.page(page)\n except PageNotAnInteger:\n posts = paginator.page(1)\n except EmptyPage:\n posts = paginator.page(paginator.num_pages)\n return render(request, 'blog/post_list.html', {'object_list': posts})\n\n\ndef post_detail(request, year, month, day, slug):\n if request.user.is_superuser:\n manager = Post.objects\n else:\n manager = Post.published\n post = get_object_or_404(manager,\n publish__year=year,\n publish__month=month,\n publish__day=day,\n slug=slug)\n return render(request, 'blog/post_detail.html', {'object': post})\n\n\ndef post_archive(request):\n posts = Post.published.order_by('-publish')\n return render(request, 'blog/post_archive.html', {'posts': posts})\n\n\ndef post_search(request):\n q = \"\"\n post_list = []\n word_list = []\n if request.GET:\n q = request.GET['q']\n word_list = search.strip_stop_words(q)\n if word_list:\n post_list = Post.published.filter(\n search.get_q(word_list, 'body', 'and') |\n search.get_q(word_list, 'title', 'and'))\n return render(request, 'blog/post_search.html', {\n 'search_term': q,\n 'object_list': post_list,\n 'word_list': word_list})\n\n\ndef post_list_by_tag(request, slug):\n tag = Tag.objects.get(slug=slug)\n posts_all = Post.published.filter(tags__id=tag.id)\n paginator = Paginator(posts_all, settings.BLOG_PAGESIZE)\n page = request.GET.get('page')\n try:\n posts = paginator.page(page)\n except PageNotAnInteger:\n posts = paginator.page(1)\n except EmptyPage:\n posts = paginator.page(paginator.num_pages)\n return render(request, 'blog/post_list.html', {\n 'object_list': posts,\n 'tag': tag})\n\n\ndef tag_cloud(request):\n tags = Tag.objects.all()\n return render(request, 'blog/tag_cloud.html', {'tags': tags})\n\n" }, { "alpha_fraction": 0.5869336128234863, "alphanum_fraction": 0.5879873633384705, "avg_line_length": 22.725000381469727, "blob_id": "5ed6449773c5b9b62f76a82e4c271d4c29cb1e7d", "content_id": "a2339dc9d5045c392673fe7d1013554b59ce5ee7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 949, "license_type": "no_license", "max_line_length": 73, "num_lines": 40, "path": "/nerdlabs/blog/urls.py", "repo_name": "lvidarte/django-nerdlabs", "src_encoding": "UTF-8", "text": "\"\"\"\nAuthor: Leo Vidarte <http://nerdlabs.com.ar>\n\nThis is free software,\nyou can redistribute it and/or modify it\nunder the terms of the GPL version 3\nas published by the Free Software Foundation.\n\n\"\"\"\n\nfrom django.conf.urls import patterns, url, include\n\nfrom nerdlabs.blog.feeds import PostFeed\nfrom nerdlabs.blog.models import Post\n\n\nurlpatterns = patterns('nerdlabs.blog.views',\n\n url(r'^(?P<year>\\d+)/(?P<month>\\d+)/(?P<day>\\d+)/(?P<slug>[-\\w]+)/$',\n view='post_detail',\n name='blog-post-detail'\n ),\n\n (r'^archive/', 'post_archive', {}, 'blog-post-archive'),\n (r'^feeds/$', PostFeed(), {}, 'blog-feeds'),\n\n url(r'^search/$',\n view='post_search',\n name='blog-post-search'\n ),\n\n url(r'^tags/(?P<slug>[-\\w]+)/$',\n view='post_list_by_tag',\n name='blog-post-list-by-tag'\n ),\n\n (r'^tags/', 'tag_cloud', {}, 'blog-tag-cloud'),\n\n url(r'^$', view='post_list', name='blog-post-list'),\n)\n" } ]
24
ThermalSense/Assessment-Index-Modelling
https://github.com/ThermalSense/Assessment-Index-Modelling
7289e8e8831021f8bb2a08a88390307098c140c4
1e159924d898e7f443451846bbc1eb100592dcee
ebc71013dedf20733c0834fcc28109ad5fa2d7b1
refs/heads/master
2023-07-08T19:12:05.642781
2021-08-09T09:01:42
2021-08-09T09:01:42
394,221,081
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5740832686424255, "alphanum_fraction": 0.582011878490448, "avg_line_length": 45.40229797363281, "blob_id": "21a167d7a779683bface237ba159f49c81a006cc", "content_id": "d00b11173a871f86171d0149e117f00fe4757205", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4036, "license_type": "no_license", "max_line_length": 115, "num_lines": 87, "path": "/index_model/index.py", "repo_name": "ThermalSense/Assessment-Index-Modelling", "src_encoding": "UTF-8", "text": "import csv\nimport datetime as dt\nimport pathlib\n\nimport pandas as pd\n\nclass IndexModel:\n def __init__(self) -> None:\n #start of index\n self.level = 100\n self.start_day = None\n self.end_day = None\n self.composition = []\n self.portfolio_weight = []\n self.index_level_dict = {}\n path = pathlib.Path('data_sources/stock_prices.csv')\n self.stocks = pd.read_csv(path)\n\n def is_start_of_month(self, index: int, column: int) -> bool:\n return self.stocks.iloc[index, column].strftime(\"%m\") != self.stocks.iloc[index - 1, column].strftime(\"%m\")\n\n def calculate_composition(self, index: int):\n composition = []\n for df_index, stock_price in enumerate(self.stocks.iloc[index - 1, 1:11]):\n composition.append([stock_price, df_index])\n composition.sort(reverse=True)\n return [composition[0][1], composition[1][1], composition[2][1]]\n\n def determine_portfolio_allocation(self, index: int):\n stock_volume_one = None\n stock_volume_two = None\n stock_volume_three = None\n highest_market_cap = 0.5 * self.level\n second_to_third_cap = 0.25 * self.level\n for i, column in enumerate(self.composition):\n # selecting the proper columns of the dataframe; column 'Date' is not yet accounted for\n column = column + 1\n if i == 0:\n stock_volume_one = highest_market_cap / self.stocks.iloc[index, column]\n if i == 1:\n stock_volume_two = second_to_third_cap / self.stocks.iloc[index, column]\n if i == 2:\n stock_volume_three = second_to_third_cap / self.stocks.iloc[index, column]\n return [stock_volume_one, stock_volume_two, stock_volume_three]\n\n def calc_index_level(self, start_date: dt.date, end_date: dt.date) -> None:\n self.stocks['Date'] = pd.to_datetime(self.stocks['Date'], format=(\n '%d/%m/%Y')) # converts the date from string format to datetime format.\n self.start_date = start_date.strftime('%d/%m/%Y')\n self.end_date = end_date.strftime('%d/%m/%Y')\n for index in self.stocks.index:\n index_calc_value = 0\n date = self.stocks.at[index, 'Date'].strftime('%d/%m/%Y')\n # select column of 'Date'. In this program at Pos. 1, therefore 0.\n if self.is_start_of_month(index, 0) and len(self.composition) > 0:\n for list_pos, volume in enumerate(self.portfolio_weight):\n index_calc_value += volume * self.stocks.iloc[index, self.composition[list_pos] + 1]\n self.composition = self.calculate_composition(index)\n self.portfolio_weight = self.determine_portfolio_allocation(index)\n self.level = index_calc_value\n self.index_level_dict[date] = self.level\n\n if self.is_start_of_month(index, 0):\n self.composition = self.calculate_composition(index)\n self.portfolio_weight = self.determine_portfolio_allocation(index)\n\n if len(self.composition) < 1:\n continue\n for list_pos, volume in enumerate(self.portfolio_weight):\n index_calc_value += volume * self.stocks.iloc[index, self.composition[list_pos] + 1]\n self.level = index_calc_value\n if self.index_level_dict.get(date) != None:\n continue\n self.index_level_dict[date] = self.level\n\n def export_values(self, file_name: str):\n with open(file_name, 'w', newline='') as file:\n writer = csv.writer(file)\n writer.writerow(['Date', 'index_level'])\n found_start_date = False\n for date, index_level in self.index_level_dict.items():\n if date == self.start_date:\n found_start_date = True\n if found_start_date:\n writer.writerow([date, index_level])\n if date == self.end_date:\n break" } ]
1
jfmalloy1/CitationNetworks
https://github.com/jfmalloy1/CitationNetworks
4ef16901e4895fd8c19de97e8bd32bc3d6bffbd5
60d72541bd72dfcc6ffe823e727f360b2743c61f
4a4e4a54f75828ad0da491dad83dfadc738901d3
refs/heads/master
2023-05-22T03:54:08.866275
2021-06-11T15:33:42
2021-06-11T15:33:42
266,176,611
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6250588893890381, "alphanum_fraction": 0.6292981505393982, "avg_line_length": 31.66153907775879, "blob_id": "20d3d1cc087369d630b689337c25e5dcb8a1a79d", "content_id": "f97b83c884d5007f7f4750e565afb92746309afa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2123, "license_type": "no_license", "max_line_length": 107, "num_lines": 65, "path": "/pubmed_download.py", "repo_name": "jfmalloy1/CitationNetworks", "src_encoding": "UTF-8", "text": "from selenium import webdriver\nfrom selenium.webdriver.support.ui import Select\nfrom selenium.webdriver.chrome.options import Options\nimport os\nimport glob\nimport time\n\ndef download_csv(cpd_id):\n #Set up downloading options\n options = Options()\n options.add_experimental_option(\"prefs\", {\n \"download.default_directory\": os.getcwd(),\n \"download.prompt_for_download\": False,\n \"download.directory_upgrade\": True,\n \"safebrowsing.enabled\": True\n })\n try:\n url = \"https://www.ncbi.nlm.nih.gov/pubmed?LinkName=pccompound_pubmed_mesh&from_uid=\" + cpd_id\n driver = webdriver.Chrome(\"../chromedriver\", chrome_options=options)\n driver.get(url)\n\n #Find the save button - click to open it (may be completely unnecessary)\n file_button = driver.find_element_by_id(\"save-results-panel-trigger\")\n file_button.click()\n\n #Select the \"all\" option\n select_selection = Select(driver.find_element_by_id(\"save-action-selection\"))\n select_selection.select_by_index(1)\n\n #Save file as csv\n select_format = Select(driver.find_element_by_id(\"save-action-format\"))\n select_format.select_by_value(\"csv\")\n\n #Actually download it\n save_button = driver.find_element_by_xpath(\"//form[@id='save-action-panel-form']/div[3]/button[1]\")\n save_button.click()\n\n #Need time for ~40k citations - 20 seconds seems appropriate\n time.sleep(20)\n except:\n print(cpd_id + \" Failed\")\n\n #Quit chrome\n driver.quit()\n\ndef change_filename(cpd_id):\n latest_file = max(glob.glob(os.getcwd() + \"/*\"), key=os.path.getctime)\n os.rename(latest_file, os.getcwd() + \"/\" + cpd_id + \".csv\")\n\ndef main():\n cpd_ids = []\n with open(\"drugs/opioid_pubchem_ids.txt\", \"r\") as f:\n for line in f:\n if line != \"\":\n cpd_ids.append(line.strip())\n\n for id in cpd_ids:\n #Download csv, given PubChem ID\n download_csv(id)\n\n #Change name of most recently downloaded file to match cpd_id\n change_filename(id)\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.5523602962493896, "alphanum_fraction": 0.5642973184585571, "avg_line_length": 37.39583206176758, "blob_id": "f53c9d29830ce0c25313ebe5883820da3c90c183", "content_id": "a724bf73580108527e111c08af1b6602664b28c7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1843, "license_type": "no_license", "max_line_length": 111, "num_lines": 48, "path": "/overlapping_papers.py", "repo_name": "jfmalloy1/CitationNetworks", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport os\nimport tqdm as tqdm\n\n\"\"\" Checks all downloaded papers (besides self) for overlaps\n Input: dataframe of a specific drug, filename (to prevent overlapping)\n Output: overlapping papers\n\"\"\"\ndef check_all_papers(df0, f0, id_link_df):\n pmids = df0[\"PMID\"].tolist() #pubmed ids of the drug to be checked\n\n overlapping_papers_dict = {} #dictionary to see overlap\n for f in os.listdir(\"drugs/\"):\n if f[0].isdigit() and f != f0:\n #Name of the drug currently being checked against\n drug_name = id_link_df[id_link_df[\"ID\"] == int(f[:-4])][\"Name\"].item()\n df = pd.read_csv(\"drugs/\" + f)\n if \"PMID\" in df.columns: #make sure the drug has papers associated with it\n for id in pmids: #for each paper id in the drug to be checked...\n if id in df[\"PMID\"]: #If it is found in the drug to be checked against add to a dictionary!\n if drug_name not in overlapping_papers_dict.keys():\n #start a new index if needed\n overlapping_papers_dict[drug_name] = 1\n else:\n #Otherwise add one to an existing index\n overlapping_papers_dict[drug_name] += 1\n\n\n print(id_link_df[id_link_df[\"ID\"] == int(f0[:-4])][\"Name\"].item())\n print(len(df0))\n print(overlapping_papers_dict)\n print()\n\ndef main():\n id_link_df = pd.read_csv(\"drugs/opioid_pubchem_ids.csv\")\n id_link_df = id_link_df.dropna()\n\n # test_df = pd.read_csv(\"drugs/41049.csv\")\n # check_all_papers(test_df, \"41049.csv\")\n\n for f in os.listdir(\"drugs/\"):\n if f[0].isdigit():\n df = pd.read_csv(\"drugs/\" + f)\n check_all_papers(df, f, id_link_df)\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.8166666626930237, "alphanum_fraction": 0.8166666626930237, "avg_line_length": 23, "blob_id": "b436bad812a6ee875c0ac815434caf31e4f245b9", "content_id": "97b872626a8c52f7607b8239e92d90e1d0f104ce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 120, "license_type": "no_license", "max_line_length": 55, "num_lines": 5, "path": "/README.md", "repo_name": "jfmalloy1/CitationNetworks", "src_encoding": "UTF-8", "text": "# CitationNetworks\nBuild academic citation networks\n\n## Purpose\nScrape PubChem (and other databases) for drug citations\n" }, { "alpha_fraction": 0.5532445907592773, "alphanum_fraction": 0.5740432739257812, "avg_line_length": 29.820512771606445, "blob_id": "d132e6f08a49558936eef5192fab8d78c46f6066", "content_id": "34bacfcf734e40384e5566efeb0b7c6b51a3bfa9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1202, "license_type": "no_license", "max_line_length": 103, "num_lines": 39, "path": "/wos_scrape.py", "repo_name": "jfmalloy1/CitationNetworks", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport math\n\n\"\"\" Make a txt file of 1) all DOIs separated by \"OR\" 2) number of DOIs found \"\"\"\n\"\"\" Input: dataframe of PubChem metadata for a specific drug, index of \"\"\"\n\"\"\" Output: a txt file \"\"\"\ndef create_search(df, label, i):\n df = df.dropna(subset=[\"DOI\"])\n\n search_term = \"\"\n for index, row in df.iterrows():\n if index != len(df):\n search_term += str(row[\"DOI\"]) + \" OR \"\n else:\n search_term += str(row[\"DOI\"])\n\n print(search_term, file=open(\"Searches/\" + label + \"_search_\" + str(i) + \".txt\", \"w\"))\n\ndef main():\n #TEST: read in single opioid citation file\n label = \"3345\"\n df = pd.read_csv(\"drugs/\" + label + \".csv\")\n size = len(df)\n print(size)\n\n #Get the number of different search terms need to be created - 6000 terms is the max WoS can handle\n searches = math.ceil(size/6000)\n\n for i in range(1, searches+1):\n #Output a text file with all DOIs separated by \"OR\": do this in groups of 6000 or less\n if size < len(df):\n create_search(df, label, i)\n else:\n print(i)\n create_search(df[1*i:6000*i], label, i)\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.6709061861038208, "alphanum_fraction": 0.68044513463974, "avg_line_length": 26.34782600402832, "blob_id": "84e51c244a2e3af9edc1793c69734cb0b70ac955", "content_id": "101ea9fc1d4cf7d44960fb8c8ab1c29fc54bcbcc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 629, "license_type": "no_license", "max_line_length": 92, "num_lines": 23, "path": "/test_scraping.py", "repo_name": "jfmalloy1/CitationNetworks", "src_encoding": "UTF-8", "text": "##NOTES\n#Collects 20 pubmed citations, given a pubchem id\n\nimport requests\nfrom bs4 import BeautifulSoup\n\ndef main():\n url = \"https://www.ncbi.nlm.nih.gov/pubmed?linkname=pccompound_pubmed_mesh&from_uid=971\"\n page = requests.get(url)\n soup = BeautifulSoup(page.text, 'html.parser')\n\n #find all div classes of \"rprtnum nohighlight\"\n id_wrapper = soup.find_all(class_=\"rprtnum nohighlight\")\n #find all PubMed ids of papers associated with a specific compound\n\n pubmed_ids = []\n for id in id_wrapper:\n pubmed_ids.append(id.input[\"value\"])\n\n print(pubmed_ids)\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.6247464418411255, "alphanum_fraction": 0.6270646452903748, "avg_line_length": 38.21590805053711, "blob_id": "beabeb69b98e9ac93a508f2e62fa506077311f60", "content_id": "d56757944445acac72631da304b60709ef3687ab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3451, "license_type": "no_license", "max_line_length": 125, "num_lines": 88, "path": "/get_drug_ids.py", "repo_name": "jfmalloy1/CitationNetworks", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport pubchempy as pcp\nimport os\nimport re\nimport pandas as pd\n\n#Return the cas number and the name of opioids\ndef get_info(path):\n df = pd.read_csv(path)\n return list(df[\"cas_number\"]), list(df[\"name\"])\n\n#Returns the pubchem ID of a compound (based on name only)\n#Only returns the first cpd_id taken from Pubchem search\ndef get_pubchem_id(name):\n print(name)\n try:\n cpd_id = pcp.get_cids(name, \"name\")\n return name, cpd_id[0]\n except:\n return name, \"\"\n\n#One-time use: remove all non-opioid results\ndef remove_non_opioids():\n #read in list of pubchem ids\n df = pd.read_csv(\"drugs/opioid_pubchem_ids.csv\", dtype={\"Name\": \"str\", \"ID\":\"str\"})\n print(len(df))\n\n drug_files = os.listdir(\"drugs\")\n\n #find all ids of compounds in Veronica's list\n old_ids = []\n for f in drug_files:\n old_ids.append(str(re.sub(\".csv\", \"\", f)))\n\n # #How many of these compounds are present in the opioid list, but not Veronica's?\n # count = 0\n # found_names = []\n # for item, row in df.iterrows():\n # if row[\"ID\"] in old_ids:\n # found_names.append(row[\"Name\"])\n #\n # print(count)\n # print(found_names)\n # print(len(found_names))\n # print(\"Difference between OG and found list:\", list(set(df[\"Name\"]) - set(found_names)))\n # #ANSWER: 4 of them (opium doesn't have a CID, everything else does and has been manually added)\n\n #Now remove all non-opioids from file\n count = 0\n for oid in old_ids:\n if oid not in df[\"ID\"].values:\n try:\n os.remove(\"drugs/\" + oid+ \".csv\")\n except:\n continue\n\ndef main():\n # #Opioid names - from Drugbank\n # opioids = [\"Tramadol\", \"Morphine\", \"Hydromorphone\", \"Methadone\", \"Meperidine\", \"Oxycodone\", \"Butorphanol\"]\n # opioids += [\"Dextropropoxyphene\", \"Pentazocine\", \"Fentanyl\", \"Nalbuphine\", \"Buprenorphine\", \"Dezocine\"]\n # opioids += [\"Dextromoramide\", \"Dihydrocodeine\", \"Ketobemidone\", \"Piritramide\", \"Meptazinol\", \"Phenazocine\"]\n # opioids += [\"Tilidine\", \"Codeine\", \"Sufentanil\", \"Alfentanil\", \"Levorphanol\", \"Remifentanil\", \"Hydrocodone\"]\n # opioids += [\"Diphenoxylate\", \"Oxymorphone\", \"Levacetylmethadol\", \"Methadyl acetate\", \"Dihydroetorphine\", \"Diamorphine\"]\n # opioids += [\"Ethylmorphine\", \"Etorphine\", \"Carfentanil\", \"Alphacetylmethadol\", \"Dihydromorphine\", \"DPDPE\"]\n # opioids += [\"Lofentanil\", \"Opium\", \"Normethadone\", \"Alphaprodine\", \"Phenoperidine\", \"Bezitramide\", \"Tapentadol\"]\n # opioids += [\"Nicomorphine\", \"Naltrexone\", \"Eluxadoline\", \"Carfentanil, C-11\", \"Desomorphine\", \"Benzhydrocodone\"]\n # #Read in drugbank csv, return cas_ids\n # cas_ids, names = get_info(\"drugs/opioids_filtered.csv\")\n #\n # #any opioids not in Veronica's list?\n # missing_opioids = list(set(opioids) - set(names))\n # #No - all opioids are present in Veronica's list. Just use the list above.\n #\n # #Find all pubchem ids of opioids\n # opiods_pubchem_ids = []\n # for name in opioids:\n # opiods_pubchem_ids.append(get_pubchem_id(name))\n #\n # #Print all ids to a file\n # print(\"Name,ID\", file=open(\"drugs/opioid_pubchem_ids.csv\", \"w\"))\n # for id in opiods_pubchem_ids:\n # print(str(id[0]) + \",\" + str(id[1]), file=open(\"drugs/opioid_pubchem_ids.csv\", \"a\"))\n\n #ONE TIME USE - remove all non-opioids from directory\n remove_non_opioids()\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.6329032182693481, "alphanum_fraction": 0.644516110420227, "avg_line_length": 29.39215660095215, "blob_id": "54da777cbc96a97b0da14a28300c34fd55d982e1", "content_id": "b3ecc388a104299dd0f7fc0406e973548caba11c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1550, "license_type": "no_license", "max_line_length": 125, "num_lines": 51, "path": "/crossref_scrape.py", "repo_name": "jfmalloy1/CitationNetworks", "src_encoding": "UTF-8", "text": "from selenium import webdriver\nfrom selenium.webdriver.support.ui import Select\nimport os\nimport re\nfrom webdriver_manager.chrome import ChromeDriverManager\nimport json\n\n\"\"\" Get list of filenames associated with PubMed opioid references \"\"\"\n\"\"\" Input: filepath to list of opioid csv files \"\"\"\n\"\"\" Output: list of opioid metadata filenames \"\"\"\ndef get_opioid_filenames(fp):\n files = os.listdir(fp)\n filenames = []\n for f in files:\n if (re.match(\"^[0-9]\", f)):\n filenames.append(f)\n\n return filenames\n\n\"\"\" Test crossref API scraping on a random DOI \"\"\"\ndef test_crossref(doi):\n url = \"https://api.crossref.org/works/\" + doi\n print(url)\n\n try:\n driver = webdriver.Chrome(ChromeDriverManager().install())\n driver.get(url)\n #print(doi, \"succeeded\")\n source = driver.page_source\n source = re.sub(\"<html><head></head><body><pre style=\\\"word-wrap: break-word; white-space: pre-wrap;\\\">\", \"\", source)\n source = re.sub(\"</pre></body></html>\", \"\", source)\n metadata = json.loads(source)\n print(metadata)\n #TODO: \"is-referenced-by-count\" is citation count (within Crossref)\n except:\n print(doi, \"failed\")\n\n driver.quit()\n\n\"\"\" Goal - download relevant metadate from crossref, given DOI \"\"\"\ndef main():\n #Get all csv files associated with opioid drugs\n opioid_files = get_opioid_filenames(\"drugs/\")\n print(opioid_files)\n\n #TEST CROSSREF SCRAPE\n test_crossref(\"10.1016/j.forsciint.2019.110137\")\n\n\nif __name__ == \"__main__\":\n main()\n" } ]
7
vendi12/JobTweetsClassifier
https://github.com/vendi12/JobTweetsClassifier
ce6ef611c737f0c570e20843d537abe2caabd2ed
ed9c8b5d86b93b9203b1d76e9184e7883fd9e128
0084bce595bce345c4c01449a89b2a45b392bc4a
refs/heads/master
2021-01-12T02:48:18.452315
2017-08-30T11:25:20
2017-08-30T11:25:20
78,105,819
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.46666666865348816, "alphanum_fraction": 0.692307710647583, "avg_line_length": 15.25, "blob_id": "6cb602de0536155b0bea9201cff2ffaccedf6c08", "content_id": "8a62732276bc2aff17746f3573a3b1c3c33928d7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 195, "license_type": "no_license", "max_line_length": 24, "num_lines": 12, "path": "/requirements.txt", "repo_name": "vendi12/JobTweetsClassifier", "src_encoding": "UTF-8", "text": "certifi==2017.7.27.1\nchardet==3.0.4\nidna==2.5\nnumpy==1.13.1\noauthlib==2.0.2\nrequests==2.18.3\nrequests-oauthlib==0.8.0\nscikit-learn==0.18.2\nscipy==0.19.1\nsklearn==0.0\ntwython==3.5.0\nurllib3==1.22\n" }, { "alpha_fraction": 0.740055501461029, "alphanum_fraction": 0.7687326669692993, "avg_line_length": 37.60714340209961, "blob_id": "e27273b143759208181d3bfb44ac2ea55c699e44", "content_id": "1e5a9d711172b5efebee064d73414c3a8a1b6f46", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1081, "license_type": "no_license", "max_line_length": 144, "num_lines": 28, "path": "/README.md", "repo_name": "vendi12/JobTweetsClassifier", "src_encoding": "UTF-8", "text": "# Feel the Buzz - a JobTweetsClassifier\n\nGoal: text classifier for tweets containing job offers\n\n## Roadmap\n\n1. Load dump of tweets (Settings -> Content -> Your Twitter archive -> Request your archive)\n2. Manually mark tweets with a job posting as positive samples (True)\n3. Sample and format the dataset as a CSV: one tweet per line with balanced classes and shuffled samples\n\n4. Train a classifier to detect job tweets, e.g. fastText, or support vector machine with RBF kernel trained on uni-grams or random forest.\n\n5. Explain the classifier predictions with LIME\n\n\n## Requirements\n\n* pandas\n* [fasttext](https://github.com/salestock/fastText.py)\n* [LIME](https://github.com/marcotcr/lime)\n\n\n## References\n\n1. [Crepe](https://github.com/zhangxiangxiao/Crepe)\n2. [fastText](https://github.com/facebookresearch/fastText)\n3. [fastText tutorial](https://github.com/facebookresearch/fastText/blob/master/tutorials/supervised-learning.md)\n4. [Lime](https://marcotcr.github.io/lime/tutorials/Lime%20-%20basic%20usage%2C%20two%20class%20case.html)(https://arxiv.org/pdf/1602.04938.pdf)\n" }, { "alpha_fraction": 0.6009008884429932, "alphanum_fraction": 0.6067567467689514, "avg_line_length": 29, "blob_id": "2afe8c86ae737c2eb4eac8d22af96dda2282e9ef", "content_id": "e8935e88605729717f09109dcb5b330d9d9b3e2e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2220, "license_type": "no_license", "max_line_length": 127, "num_lines": 74, "path": "/detect_jobs_tweepy.py", "repo_name": "vendi12/JobTweetsClassifier", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n'''\nCreated on Aug 30, 2017\n.. codeauthor: svitlana vakulenko\n<svitlana.vakulenko@gmail.com>\n\nClassifies tweets in real time\n'''\nfrom tweepy.streaming import StreamListener\nfrom tweepy import Stream, API, OAuthHandler\n\nfrom sklearn.externals import joblib\n\n\nfrom twitter_settings import *\n\n\nclass TopicListener(StreamListener):\n '''\n Overrides Tweepy class for Twitter Streaming API\n '''\n\n def __init__(self, model_path, vectorizer_path='vectorizer.pkl'):\n # load classifier\n self.clf = joblib.load(model_path)\n # tweet representation as tfidf\n self.vectorizer = joblib.load(vectorizer_path)\n # set up Twitter connection\n self.auth_handler = OAuthHandler(APP_KEY, APP_SECRET)\n self.auth_handler.set_access_token(OAUTH_TOKEN, OAUTH_TOKEN_SECRET)\n self.api = API(self.auth_handler)\n\n def on_status(self, status):\n # ignore retweets\n if not hasattr(status,'retweeted_status') and status.in_reply_to_status_id == None:\n # preprocess\n tweet_text = status.text.encode('utf-8').replace('\\n', '')\n # print (tweet_text)\n # classify\n tweet_vector = self.vectorizer.transform([tweet_text])\n job_tweet_prediction = self.clf.predict_proba(tweet_vector)[0,1]\n if job_tweet_prediction > 0.73:\n print tweet_text\n print job_tweet_prediction\n # retweet\n self.api.update_status(status='https://twitter.com/%s/status/%s' % (tweet['user']['screen_name'], tweet['id']))\n\n def on_error(self, status_code):\n print (status_code, 'error code')\n\n\ndef detect_jobs(model_path='random_forest.pkl'):\n '''\n Connect to Twitter API and fetch relevant tweets from the stream\n '''\n listener = TopicListener(model_path)\n\n # start streaming\n while True:\n try:\n stream = Stream(listener.auth_handler, listener)\n print ('Listening...')\n stream.sample(languages=['en'])\n # stream.sample()\n except Exception as e:\n # reconnect on exceptions\n print (e)\n continue\n\n\nif __name__ == '__main__':\n detect_jobs()\n" }, { "alpha_fraction": 0.6491507291793823, "alphanum_fraction": 0.656050980091095, "avg_line_length": 27.119403839111328, "blob_id": "e732312c82476c44e9010cc53fbb8966bd2da898", "content_id": "a51dbfe78775b3a3c40b4db66cfa213672972f77", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1884, "license_type": "no_license", "max_line_length": 103, "num_lines": 67, "path": "/stream.py", "repo_name": "vendi12/JobTweetsClassifier", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n'''\nCreated on Jun 27, 2017\n.. codeauthor: svitlana vakulenko\n<svitlana.vakulenko@gmail.com>\n\nCollects tweets harvesting the official Twitter API\n'''\n\nfrom collections import deque\n\nfrom threading import Thread\nfrom requests.exceptions import ChunkedEncodingError\n\nfrom twython import TwythonStreamer\nfrom twitter_settings import *\n\n\nclass TwitterStream(TwythonStreamer):\n\n def __init__(self, consumer_key, consumer_secret, token, token_secret, tqueue):\n self.tweet_queue = tqueue\n super(TwitterStream, self).__init__(consumer_key, consumer_secret, token, token_secret)\n\n def on_success(self, data):\n if 'text' in data:\n self.tweet_queue.append(data)\n\n def on_error(self, status_code, data):\n print status_code\n\n\ndef stream_tweets(tweets_queue):\n try:\n stream = TwitterStream(APP_KEY, APP_SECRET,\n OAUTH_TOKEN, OAUTH_TOKEN_SECRET, tweets_queue)\n # You can filter on keywords, or simply draw from the sample stream\n # stream.statuses.filter(track='twitter', language='en')\n stream.statuses.sample(language='en')\n except ChunkedEncodingError:\n # Sometimes the API sends back one byte less than expected which results in an exception in the\n # current version of the requests library\n stream_tweets(tweets_queue)\n\n\ndef process_tweets(tweets_queue, limit):\n # save tweet_texts\n documents = []\n while True:\n if len(tweets_queue) > 0:\n tweet = tweets_queue.popleft()\n tweet_text = tweet['text'].encode('utf-8').replace('\\n', '')\n print tweet_text\n\n\ndef test_stream_tweets():\n tweet_queue = deque()\n tweet_stream = Thread(target=stream_tweets, args=(tweet_queue,))\n tweet_stream.start()\n\n process_tweets(tweet_queue, limit=1000)\n\n\nif __name__ == \"__main__\":\n test_stream_tweets()\n" }, { "alpha_fraction": 0.6464767456054688, "alphanum_fraction": 0.6563718318939209, "avg_line_length": 25.0625, "blob_id": "4b13bf0a36b8f13e98a9d0cedf24b051330415e6", "content_id": "941a18f59bd25c312ec18bf89aa5b458c0cb40a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3337, "license_type": "no_license", "max_line_length": 88, "num_lines": 128, "path": "/classify.py", "repo_name": "vendi12/JobTweetsClassifier", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n'''\nCreated on Jan 5, 2017\n.. codeauthor: svitlana vakulenko\n<svitlana.vakulenko@gmail.com>\n\nIdentifies the tweets containing a job ad\n'''\n\n# Load text processing libraries\nimport pandas as pd\n\nimport fasttext\n\n\nDATASET_PATH = 'data/all_tweets.csv'\nDATASET_PATH_FT_FORMAT = 'data/all_tweets_ft.csv'\nTEST_DATASET_PATH = 'data/test_tweets_ft.csv'\n\n\n# 1. Load the labeled dataset into a dataframe\ndef load_dataset(dataset_path):\n return pd.read_csv(dataset_path, sep='\\t',header=None)\n\n\ndef test_load_dataset():\n dataset_path = 'data/my_tweets.csv'\n df = load_dataset(dataset_path)\n assert df.shape\n print \"Loaded table\", df.shape\n print df[0]\n\n\ndef prepare_dataset(dataset_path, output_path):\n '''\n dataset formatting for fastText classification\n + remove paragraphs with line separators: save one tweet per line\n + balance classes\n + shuffle samples\n '''\n df = load_dataset(dataset_path)\n \n # preprocessing:\n # strip new line chars\n df[1] = df[1].str.replace('\\n', '')\n # lowercase\n df[1] = df[1].str.lower()\n # df[0] = str(df[0]).lower()\n df[0] = '__label__' + df[0].astype(str)\n\n print \"Loaded table\", df.shape\n # with open(output_path, 'w') as f_out:\n # for row in df.rows():\n # f_out.write(row)\n # show class distribution\n # print df.groupby(1).count()\n false_samples = df.loc[df[0]=='__label__False']\n true_samples = df.loc[df[0]=='__label__True']\n print len(false_samples), len(true_samples)\n # downsample false classes\n n_samples_per_class = min(len(false_samples), len(true_samples))\n false_samples = false_samples.sample(n=n_samples_per_class)\n true_samples = true_samples.sample(n=n_samples_per_class)\n print len(false_samples), len(true_samples)\n dataset = true_samples.append(false_samples, ignore_index=True)\n # shuffle\n dataset = dataset.sample(frac=1).reset_index(drop=True)\n print len(dataset)\n dataset.to_csv(output_path, sep=',', index=False, header=False)\n\n\ndef test_prepare_dataset(dataset_path=DATASET_PATH, output_path=DATASET_PATH_FT_FORMAT):\n prepare_dataset(dataset_path, output_path)\n\n\ndef train_fasttext(dataset_path=DATASET_PATH_FT_FORMAT):\n # training\n # ./fasttext supervised -input data.train.txt -output model\n classifier = fasttext.supervised(dataset_path, 'model', epoch=50)\n classifier.word_ngrams = 5\n evaluate_classifier(classifier)\n\n\ndef evaluate_classifier(classifier):\n # evaluation\n # ./fasttext test model.bin test.txt\n result = classifier.test(TEST_DATASET_PATH)\n print 'P@1:', result.precision\n print 'R@1:', result.recall\n print 'Number of examples:', result.nexamples\n\n\ndef explain_classifier():\n # load model\n classifier = fasttext.load_model('model.bin', encoding='utf-8')\n # evaluation\n evaluate_classifier(classifier)\n\n\n# def preprocess(tweets):\n# '''\n# Procedure to preprocess a list of tweet texts\n \n# tweets - list of strings\n\n# '''\n\n\n# def test_preprocess():\n# tweets = [\"\"\"RT @fchollet: How I get my ML news: \n\n# 1) Twitter \n# 2) arxiv\n# 3) mailing lists \n# .\n# .\n# .\n# 97) overheard at ramen place\n# 98) graffiti in bathroom st…\"\"\"]\n# preprocess(tweets)\n\n\nif __name__ == '__main__':\n # test_prepare_dataset()\n # train_fasttext()\n explain_classifier()" }, { "alpha_fraction": 0.572898805141449, "alphanum_fraction": 0.5801886916160583, "avg_line_length": 30.513513565063477, "blob_id": "1d2994745662dc0f296254b9f9027a6374346f48", "content_id": "2e42ff61edf74dc61dbf65f9d438991ced98717c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2332, "license_type": "no_license", "max_line_length": 139, "num_lines": 74, "path": "/detect_jobs.py", "repo_name": "vendi12/JobTweetsClassifier", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n'''\nCreated on Aug 9, 2017\n.. codeauthor: svitlana vakulenko\n<svitlana.vakulenko@gmail.com>\n\nClassifies tweets in real time\n'''\nfrom collections import deque\nfrom threading import Thread\n\nfrom sklearn.externals import joblib\n\nfrom twython import Twython\n\nfrom twitter_settings import *\nfrom stream import stream_tweets\n\n\nclass JobTweetsClassifier():\n\n def __init__(self, model_path, vectorizer_path='vectorizer.pkl'):\n # load classifier\n self.clf = joblib.load(model_path)\n # tweet representation as tfidf\n self.vectorizer = joblib.load(vectorizer_path)\n # connect to Twitter\n # twitter = Twython(APP_KEY, APP_SECRET)\n self.twitter = Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)\n\n def launch(self):\n # start streaming tweets\n tweet_queue = deque()\n tweet_stream = Thread(target=stream_tweets, args=(tweet_queue,))\n tweet_stream.start()\n self.detect_jobs(tweet_queue, limit=1000)\n\n def detect_jobs(self, tweets_queue, limit):\n # save tweet_texts\n documents = []\n while True:\n if len(tweets_queue) > 0:\n tweet = tweets_queue.popleft()\n\n # ignore retweets\n if not 'retweeted_status' in tweet.keys() and tweet['in_reply_to_status_id'] == None:\n tweet_text = tweet['text'].encode('utf-8').replace('\\n', '')\n # print (tweet_text)\n tweet_vector = self.vectorizer.transform([tweet_text])\n job_tweet_prediction = self.clf.predict_proba(tweet_vector)[0,1]\n if job_tweet_prediction > 0.73:\n print tweet_text\n print job_tweet_prediction\n # retweet\n self.twitter.update_status(status='https://twitter.com/%s/status/%s' % (tweet['user']['screen_name'], tweet['id']))\n\n\ndef test_detect_jobs(model_path='random_forest.pkl'):\n jobs_monitor = JobTweetsClassifier(model_path)\n # start streaming\n while True:\n try:\n print ('Listening...')\n jobs_monitor.launch()\n except Exception as e:\n # reconnect on exceptions\n print (e)\n continue\n\n\nif __name__ == \"__main__\":\n test_detect_jobs()\n" } ]
6
SaneStreet/FirstFlaskApp
https://github.com/SaneStreet/FirstFlaskApp
4e72ebbf65adb722790ba0f33f967e8f4581ed77
2005a3801ff08b4d68160ed2049d0b43a1f34efa
55d3dd2ea9d6f18e9a082be3abb877799012960c
refs/heads/master
2020-06-02T02:04:26.273905
2019-06-09T12:09:00
2019-06-09T12:09:00
191,001,231
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.568965494632721, "alphanum_fraction": 0.5977011322975159, "avg_line_length": 16.399999618530273, "blob_id": "bf605de23865785e3fb145ae2406e78d31b7b168", "content_id": "a8b9ac5d257a7cc2880978b0028e651657d9acc7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 174, "license_type": "no_license", "max_line_length": 39, "num_lines": 10, "path": "/File2.py", "repo_name": "SaneStreet/FirstFlaskApp", "src_encoding": "UTF-8", "text": "# File2.py\n\nimport File1\n\nprint (\"File2 __name__ = %s\" %__name__)\n\nif __name__ == \"__main__\":\n\tprint (\"File2 is being run directly\")\nelse:\n\tprint (\"File2 is being imported\")\n" }, { "alpha_fraction": 0.6737108826637268, "alphanum_fraction": 0.6745561957359314, "avg_line_length": 27.878047943115234, "blob_id": "ee5c898da1858dada1da676f9da1bd6c00d9b714", "content_id": "1b8a232a42eb25dec3653734fad0813cf679930d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1183, "license_type": "no_license", "max_line_length": 83, "num_lines": 41, "path": "/main.py", "repo_name": "SaneStreet/FirstFlaskApp", "src_encoding": "UTF-8", "text": "from flask import Flask, request, render_template\n\n# Hjaelper builder til at finde routes\napp = Flask(__name__)\n\n# This routes different pages in your App using python functions\n# @ signifies a decorator - a way to wrap a function and modifying its behavior\n@app.route(\"/\") # root dir\ndef hello_flask():\n return \"Hello Flask!\"\n\n@app.route(\"/<user>\")\ndef index(user=None):\n return render_template(\"user.html\", user=user) # hver side skal have en return\n\n# Tuna page\n@app.route('/bacon', methods=['GET', 'POST'])\ndef bacon():\n if request.method == 'POST':\n return \"You are using POST\"\n else:\n return \"You are using GET\"\n\n# Profile pages with custom names\n@app.route('/profile/<username>')\ndef profile(username):\n return render_template(\"profile.html\", username=username)\n\n# use Integer with pages in url\n@app.route('/post/<int:post_id>')\ndef post(post_id):\n return \"<h2>Post ID is %s\" % post_id\n\n@app.route(\"/shopping\")\ndef shopping():\n food = [\"Cheese\", \"Tuna\", \"Beef\"]\n return render_template(\"shopping.html\", food=food)\n\n# checks if we only run the app/server when this python file is called\nif __name__ == \"__main__\":\n app.run(debug=True)" } ]
2
Bindernews/LazorsSolver
https://github.com/Bindernews/LazorsSolver
b1ff5fbca22d9e4bcdddd2594ffaee02f1dccb4a
cc6c0cfae48ed1b2d3429b9b2a49de21a5577c86
9f6998033618e898f021a066dbb7e00c38cfd1c7
refs/heads/master
2021-09-05T04:15:38.034328
2018-01-24T04:45:40
2018-01-24T04:45:40
118,576,516
3
0
null
null
null
null
null
[ { "alpha_fraction": 0.761273205280304, "alphanum_fraction": 0.7665782570838928, "avg_line_length": 50.45454406738281, "blob_id": "a92027973b0b84d7d7cd1bd95d495ffe6eb08838", "content_id": "f8c76ab12365d4158acc14b1b87f525389644d45", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1131, "license_type": "no_license", "max_line_length": 122, "num_lines": 22, "path": "/README.md", "repo_name": "Bindernews/LazorsSolver", "src_encoding": "UTF-8", "text": "Lazor Solver\n============\n\nThis is a solver for [the Lazors game](https://play.google.com/store/apps/details?id=net.pyrosphere.lazors).\nIt takes a screenshot of the game and attempts to deduce and solve the puzzle from just that screenshot.\n\n## Installing\nThe program should work on Python 3.5, but has only been tested on 3.6.\nUse `pip install -r requirements.txt` (preferably in a [virtual environment](https://docs.python.org/3/library/venv.html))\nto install the required libraries.\n\n## Running\nSimply execute `python solve.py`. To change the file being processed, edit the `cv2.imread(<filename>)` line in\nthe `main()` function.\n\nNote that the program will generate several `logXX.png` files in the directory where it's run. This is for debugging\npurposes as the program is still a work in progress.\n\n## History\nI became bored with solving the puzzles on my own, so I decided to write a solver because I thought it would be fun.\nThen I realized I didn't want to have to manually enter the game state, so I began tinkering with image processing\nand computer vision in order to determine the state of the game given a screenshot." }, { "alpha_fraction": 0.6073601245880127, "alphanum_fraction": 0.6260314583778381, "avg_line_length": 38.338863372802734, "blob_id": "5a48c2a57e13fd6aa38f52aab6a8085529510e2c", "content_id": "cfc4c9ac9bb452d1f8646063f19b5154475ddb04", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16603, "license_type": "no_license", "max_line_length": 134, "num_lines": 422, "path": "/solve.py", "repo_name": "Bindernews/LazorsSolver", "src_encoding": "UTF-8", "text": "from collections import namedtuple\n\nimport cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom matplotlib import gridspec\nimport skimage.transform\n\nCOLOR_RED = (0,0,255)\nDEBUG = True\n# Normallized grid size (square)\nNORM_GRID_SIZE = 100\n\nPuzzleGrid = namedtuple('PuzzleGrid', [\n 'top', # Top of the cropped puzzle area (contains the entire puzzle, not just the grid)\n 'bottom', # Bottom of the cropped puzzle area, contains the entire puzzle\n 'rows', # Array of Y-values indicating the top of each row, excludes the bottom of the last row\n 'cols', # Array of X-values indicating the left of each column, excludes the right side of the last col\n 'all_rows', # rows + last row\n 'all_cols', # cols + last col\n 'half_rows', # rows + half-rows, for targets and lasers\n 'half_cols', # cols + half-cols, for targets and lasers\n 'size' # Size of the square box size\n ]\n)\n\ndef imread_gray(fname):\n img = cv2.imread(fname, cv2.IMREAD_GRAYSCALE)\n if img is None:\n raise ValueError('File ' + fname + ' not found')\n return img\n\nclass Templates:\n _TILE_DATA = {\n 'block': 'template_white.png',\n 'portal': 'template_portal.png',\n }\n _LASER_FILE = 'template_laser.png'\n \n def __init__(self, folder='template/'):\n self.folder = folder\n self.TILES = {}\n for k,v in Templates._TILE_DATA.items():\n self.TILES[k] = imread_gray(folder + v)\n self.LASER = cv2.imread(folder + Templates._LASER_FILE)\n if self.LASER is None:\n raise ValueError('Laser template not found')\n\n# Holds template information\nTEMPLATES = Templates()\n\ndef filter_range(img, min, max, vset=None, invert=False):\n st = vset or max\n ret, dst = cv2.threshold(img, max, 255, cv2.THRESH_TOZERO_INV)\n ret, dst = cv2.threshold(dst, min, st, cv2.THRESH_BINARY, dst)\n if invert:\n dst = np.bitwise_xor(dst, st, out=dst)\n return dst\n\nglobal _imlog_count\n_imlog_count = 0\ndef imlog(img, name=None):\n \"\"\"\n Convenience function to save the given image to a .png.\n \"\"\"\n global _imlog_count\n name = ('-' + name) if name else ''\n fname = 'log{:02}{:}.png'.format(_imlog_count, name)\n _imlog_count += 1\n cv2.imwrite(fname, img)\n\ndef calc_threshold_ranges(data):\n \"\"\"\n Poorly-named function which calculates the ranges of \"on\" pixels, as well as the inverse.\n \"\"\"\n # Have numpy determine where the values change (T to F, F to T)\n diffs = np.diff(data)\n # toone = beginning of set of values, tozero = end of set of values\n toone = np.where(diffs == 1)[0]\n tozero = np.where(diffs == -1)[0]\n ranges = tozero - toone\n inv_ranges = toone[1:] - tozero[:-1]\n return diffs, toone, tozero, ranges, inv_ranges\n \ndef fill_in_small_empty_areas(data, toone, tozero, inv_ranges, threshold):\n \"\"\"\n Fill in inv_ranges smaller than threshold by modifying data in-place.\n Specifically works with inv_ranges, not ranges.\n \"\"\"\n for i in range(len(inv_ranges)):\n if inv_ranges[i] <= threshold:\n data[tozero[i]:toone[i + 1] + 1] = 1\n \ndef find_center_area(gray):\n \"\"\"\n Find the center area of the puzzle.\n \"\"\"\n # Blur and filter_range to make the image agreeable to the processing we want to perform\n height, width = gray.shape\n cur = gray\n cur = cv2.medianBlur(cur, 21)\n cur = filter_range(cur, 50, 104, invert=True)\n imlog(cur)\n # Make a copy of the image before we modify it more\n blocked = cur.copy()\n # Set all positive values to 1\n cur[cur > 0] = 1\n # Sum each row to determine the largest most continuous set of rows\n rows = np.sum(cur, 1)\n # Set the first and last rows to 0 so we have a consistent pattern 0 to 1 to 0 to 1, etc.\n rows[0] = 0\n rows[-1] = 0\n # Filter out small values and convert to set of 1s and 0s\n rows[rows <= int(width * 0.10)] = 0\n rows[rows > 0] = 1\n rows = np.int8(rows)\n # ranges = sizes of \"on\" sections, inv_ranges = sizes of \"off\" sections\n diffs, toone, tozero, ranges, inv_ranges = calc_threshold_ranges(rows)\n # Remove any \"small\" empty areas by filling them in\n fill_in_small_empty_areas(rows, toone, tozero, inv_ranges, int(height * 0.05))\n # Recalculate the values\n diffs, toone, tozero, ranges, inv_ranges = calc_threshold_ranges(rows)\n # Find the largest range\n max_range = np.argmax(ranges)\n # Determine the top and bottom\n if max_range > 0:\n top = toone[max_range] - int(inv_ranges[max_range - 1] * 0.5)\n else:\n top = 0\n if max_range < len(ranges) - 1:\n bottom = tozero[max_range] + int(inv_ranges[max_range] * 0.5)\n else:\n bottom = height\n blocked = blocked[top:bottom]\n return top, bottom, blocked\n \n\ndef combine_close_values(values, counts=None, epsilon=1, dtype=np.int_, selector=np.median):\n if counts is None:\n counts = np.ones(len(values), dtype=np.int32)\n diffs = np.diff(values)\n out_values = [[values[0]]] # Array of array of values, will get median'd out and flattened\n out_counts = [counts[0]]\n # First combine close values and counts\n for i in range(len(diffs)):\n next_value = values[i + 1]\n next_count = counts[i + 1]\n if diffs[i] <= epsilon:\n out_values[-1].append(next_value)\n out_counts[-1] += next_count\n else:\n out_values.append([next_value])\n out_counts.append(next_count)\n # Now select the value from the list of values for this combined value\n for i in range(len(out_values)):\n out_values[i] = selector(out_values[i])\n return np.array(out_values, dtype=dtype), np.array(out_counts, dtype=np.int32)\n\ndef histogram_axis(img, axis, threshold=0.2):\n hist = np.sum(img, axis)\n # Fitler small values\n hist[hist <= threshold * hist.max()] = 0\n # Get list of unfiltered values\n selected = np.where(hist > 0)\n return selected[0], hist[selected]\n\ndef edge_detect_rows_cols(img, threshold=0.2):\n # Do edge detection and then build a column histogram\n edges = cv2.Canny(img, 50, 200)\n # imlog(edges)\n edges[edges > 1] = 1\n cols, _ = histogram_axis(edges, 0, threshold)\n rows, _ = histogram_axis(edges, 1, threshold)\n return rows, cols\n\ndef find_square_size(graycrop):\n height, width = graycrop.shape\n # Build row and col \"histogram\"s to determine the most likely row and column locations\n rows, cols = edge_detect_rows_cols(graycrop)\n # Empty squares can create false cols/rows, combine them if possible\n cols, _ = combine_close_values(cols, None, 0.02 * width, selector=np.max)\n rows, _ = combine_close_values(rows, None, 0.02 * height, selector=np.max)\n # Find the differences between suspected column values, this gives us possible square sizes\n combined = np.append(np.diff(cols), np.diff(rows))\n sizes, counts = np.unique(combined, return_counts=True)\n # Combine values which are close together and determine the most popular possible box sizes\n sizes, counts = combine_close_values(sizes, counts, 4, selector=np.max)\n # Now that we have the most popular box sizes, select the largest, most popular size. We use the LARGEST index with the max counts\n box_size = sizes[np.where(counts == counts.max())[0][-1]]\n return rows, cols, box_size\n\ndef nearest_step(start, step, target):\n \"\"\"\n Find nearest `v` to `target` where `v = start + (n * step)`.\n Or, find a value close to :target: which equals :start: plus some number of :step:s.\n \"\"\"\n if start == target:\n return start\n n = round((target - start) / step)\n x = start + (n * step)\n return int(x)\n\ndef calculate_puzzle_grid(gray):\n # Find the center area\n top, bottom, filter_crop = find_center_area(gray)\n gray_crop = gray[top:bottom]\n # Find the expected rows, cols, and box size\n rows, cols, box_size = find_square_size(gray_crop)\n # Edge detect on the thresholded image to get more edges\n more_rows, more_cols = edge_detect_rows_cols(filter_crop)\n # Find the min and max values of possible rows / cols\n col_min = min(cols.min(), more_cols.min())\n col_max = max(cols.max(), more_cols.max())\n row_min = min(rows.min(), more_rows.min())\n row_max = max(rows.max(), more_rows.max())\n # Find estimated number of rows/cols. We do want to round, but only round up a little bit.\n row_count = int(((row_max - row_min) / box_size) + 0.2)\n col_count = int(((col_max - col_min) / box_size) + 0.2)\n # Recalc row_min and col_min by using a middle column, as they are more accurate\n row_min = nearest_step(rows[1], box_size, row_min)\n col_min = nearest_step(cols[1], box_size, col_min)\n # Use the corrected row_min and col_min to calculate new row and column arrays\n rows = np.int32([row_min + (box_size * i) for i in range(row_count + 1)])\n cols = np.int32([col_min + (box_size * i) for i in range(col_count + 1)])\n half_rows = np.int32(list(range(rows[0], rows[-1], box_size // 2)))\n half_cols = np.int32(list(range(cols[0], cols[-1], box_size // 2)))\n # Return all relevant data as a PuzzleGrid\n return PuzzleGrid(\n top = top,\n bottom = bottom,\n rows = rows[:-1], cols = cols[:-1],\n all_rows = rows, all_cols = cols,\n half_rows = half_rows, half_cols = half_cols,\n size = box_size)\n\ndef np_divmul(arr, divisor):\n arr = np.floor_divide(arr, divisor, out=arr)\n arr = np.multiply(arr, divisor, out=arr)\n return arr\n\ndef prepare_for_identify_tile(gray):\n blur = gray\n blur = np.uint8(cv2.GaussianBlur(blur, (15,15), 0))\n blur = np_divmul(blur, 8)\n blur = np_divmul(blur, 65)\n return blur\n\ndef normalize_puzzle_grid(grid, img):\n \"\"\"\n Normallize the image to a standard size to ease image processing.\n Provides one tile's worth of border around the grid.\n \"\"\"\n height, width = img.shape[:2]\n orig_top = grid.rows[0] + grid.top - grid.size\n orig_bottom = grid.rows[-1] + grid.top + (grid.size * 2) # last row + border\n orig_left = max(grid.cols[0] - grid.size, 0)\n orig_right = min(grid.cols[-1] + (grid.size * 2), width) # last col + border\n new_width = NORM_GRID_SIZE * (len(grid.rows) + 3) # last col + left border + right border\n new_height = NORM_GRID_SIZE * (len(grid.cols) + 3) # last row + top border + bottom border\n new_rows = list(range(NORM_GRID_SIZE, new_height, NORM_GRID_SIZE))\n new_cols = list(range(NORM_GRID_SIZE, new_width, NORM_GRID_SIZE))\n print(orig_top, orig_bottom, orig_left, orig_right)\n new_img = resize_image(img[orig_top:orig_bottom, orig_left:orig_right], (new_height, new_width))\n new_grid = PuzzleGrid(top=0, bottom=new_height, rows=new_rows,\n cols=new_cols, size=NORM_GRID_SIZE)\n return new_grid, new_img\n\ndef resize_image(src, shape):\n \"\"\" Resize an image correctly. \"\"\"\n return np.array(skimage.transform.resize(src, shape, mode='constant', preserve_range=True), dtype=src.dtype)\n\ndef log_pixel_values(img):\n values, counts = np.unique(img, return_counts=True)\n useful_counts = np.where(counts >= 5)\n values = values[useful_counts]\n counts = counts[useful_counts]\n print(values, counts)\n\ndef identify_tile(tile_orig):\n \"\"\"\n Use a combination of several methods to attempt to identify the type of tile.\n Returns the string name of the tile or None if type is unknown.\n \"\"\"\n # This is the number of pixels required for a tile to be \"fully\" that color. 83%\n PIXEL_COUNT_THRESHOLD = 8300\n tile = resize_image(tile_orig, (NORM_GRID_SIZE, NORM_GRID_SIZE))\n # First we try to match known tile templates. These are more complex tiles.\n for k, template in TEMPLATES.TILES.items():\n res = cv2.matchTemplate(tile, template, cv2.TM_CCOEFF_NORMED)\n loc = np.where(res >= 0.8)\n if len(loc[0]) > 0:\n return k\n # Certain tiles are mostly solid color\n values, counts = np.unique(tile, return_counts=True)\n if counts[np.where(values == 65)] >= PIXEL_COUNT_THRESHOLD:\n return 'empty'\n if counts[np.where(values == 0)] >= PIXEL_COUNT_THRESHOLD:\n return 'black'\n # If we have no idea what it is, then display potentially useful pixel values\n log_pixel_values(tile)\n return None\n\ndef test_isolate_boxes(img, gray):\n # Determine where the puzzle is and the grid size\n grid = calculate_puzzle_grid(gray)\n gray_crop = gray[grid.top:grid.bottom]\n\n if DEBUG:\n img_copy = img.copy()[grid.top:grid.bottom]\n img_copy[:, grid.cols] = COLOR_RED\n img_copy[grid.rows, :] = COLOR_RED\n imlog(img_copy)\n \n # Use slightly larger capture size to make sure we get the whole box\n capture_size = int(grid.size * 1.02)\n offset_value = int(grid.size * 0.01)\n\n # Segment the grayscale image for image matching\n filtered = prepare_for_identify_tile(gray_crop)\n if DEBUG:\n filtered_copy = cv2.cvtColor(filtered, cv2.COLOR_GRAY2BGR)\n filtered_copy[:, grid.cols] = COLOR_RED\n filtered_copy[grid.rows, :] = COLOR_RED\n imlog(filtered_copy)\n\n # Now grab each section\n # These two items are for matplotlib and making a nice grid\n gs = gridspec.GridSpec(len(grid.rows), len(grid.cols), right=0.6, hspace=0.5, wspace=0.05)\n gsiter = iter(gs)\n for r in grid.rows:\n y = r - offset_value\n for c in grid.cols:\n x = c - offset_value\n tile = filtered[y:y + capture_size, x:x + capture_size]\n tile_gray = gray_crop[y:y + capture_size, x:x + capture_size]\n tile_type = identify_tile(tile)\n\n if DEBUG:\n # This is to plot the images so we can see what's happening.\n ax = plt.subplot(next(gsiter))\n ax.set_title(tile_type or 'unknown')\n ax.imshow(tile_gray, cmap='gray')\n ax.set_xticks([])\n ax.set_yticks([])\n if DEBUG:\n plt.show()\n\ndef test_find_targets(img, gray):\n grid = calculate_puzzle_grid(gray)\n gray_crop = gray[grid.top:grid.bottom]\n proc = gray_crop\n circles = cv2.HoughCircles(proc, cv2.HOUGH_GRADIENT, 1.3, (grid.size // 4), maxRadius=(grid.size // 3))\n\n if circles is None:\n print('No circles')\n return\n\n circles = np.round(circles[0, :]).astype(\"int\")\n epsilon = grid.size // 4\n for (x, y, r) in circles:\n c_row = np.where(np.abs(grid.half_rows - y) <= epsilon)\n c_col = np.where(np.abs(grid.half_cols - x) <= epsilon)\n print(c_row, c_col)\n\n if DEBUG:\n output = cv2.cvtColor(proc, cv2.COLOR_GRAY2BGR)\n # convert the (x, y) coordinates and radius of the circles to integers\n \n # loop over the (x, y) coordinates and radius of the circles\n for (x, y, r) in circles:\n # draw the circle in the output image, then draw a rectangle\n # corresponding to the center of the circle\n cv2.circle(output, (x, y), r, (0, 255, 0), 4)\n cv2.rectangle(output, (x - 5, y - 5), (x + 5, y + 5), (0, 128, 255), -1)\n imlog(output)\n\ndef test_find_lasers(img, gray):\n grid = calculate_puzzle_grid(gray)\n color_crop = img[grid.top:grid.bottom]\n gray_crop = gray[grid.top:grid.bottom]\n\n # Edge detection higlights the lasers\n edges = cv2.Canny(gray_crop, 50, 200)\n # Now try to find the lines\n linesP = cv2.HoughLinesP(edges, 1, np.pi / 180,\n threshold = 50,\n minLineLength = (grid.size // 8),\n maxLineGap = 0)\n print(linesP)\n\n if DEBUG:\n output = cv2.cvtColor(edges, cv2.COLOR_GRAY2BGR)\n for ln in linesP:\n pt1, pt2 = tuple(ln[0][0:2]), tuple(ln[0][2:4])\n cv2.line(output, pt1, pt2, (255,0,255)) \n cv2.imshow('lines', output)\n\n # Lasers are always at a 45-degree angle (at least for us)\n for ln in linesP:\n ln = ln[0]\n pt1, pt2 = ln[0:2], ln[2:4]\n\n \n # hsv = cv2.cvtColor(color_crop, cv2.COLOR_BGR2HSV)\n # hue = hsv[:,:,0]\n # hsv[np.where(hsv[:,:,2] <= 180)] = 0\n # hsv[30 < hue and hue < 220] = 0\n # cv2.imshow('red', cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR))\n # mask = cv2.inRange(hsv, (30, 0, 0), (200, 255, 255))\n # cv2.imshow('mask', mask)\n res = cv2.matchTemplate(color_crop, TEMPLATES.LASER, cv2.TM_CCOEFF_NORMED)\n lasers = np.where(res >= 0.8)\n print(lasers)\n cv2.waitKey(0)\n\ndef main():\n img = cv2.imread('test/test3.png')\n img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n return test_isolate_boxes(img, img_gray)\n\nif __name__ == '__main__':\n main()\n\n\n" } ]
2
evirma/nginxpla
https://github.com/evirma/nginxpla
934f82eac621a830e2048e5fab1da0f2fc0f41c1
24ff15809cb3c6f81821c09d07e52c629fca491a
4d8bc6caf2e31a92c30904917b032773f54d4965
refs/heads/master
2023-06-19T11:18:46.257419
2021-07-15T13:22:58
2021-07-15T13:22:58
381,649,224
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5904150605201721, "alphanum_fraction": 0.6089003086090088, "avg_line_length": 36.941558837890625, "blob_id": "9caa6ee0ad0d5eb668e0d6a5ef01a3aa4bb30ff5", "content_id": "b624218fa0cf7a67bcab9af1cbfb4bc6c96b9c92", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 11687, "license_type": "permissive", "max_line_length": 245, "num_lines": 308, "path": "/README.rst", "repo_name": "evirma/nginxpla", "src_encoding": "UTF-8", "text": "***********************************************************\n``nginxpla`` utility for nginx access log real-time metrics\n***********************************************************\n\nInspired by `ngxtop <https://github.com/lebinh/ngxtop>`_\n\n``nginxpla`` is console nginx's log parser and analyser written in python. Fully configurable reports and templates.\nLike ``ngxtop`` it allows build ``top``-like custom reports by chosen metrics.\nI have tried my best to do it customizable and extendable. \n\n ``nginxpla`` is very powefull for troubleshooting and monitoring your Nginx server here and now. \n It is not suitable for long-term monitoring, because under the hood it has sqlite3. \n Performance can degrade when large amounts of data accumulate. So, you warned.\n\n``nginxpla`` is config-based utility. It means after first run it create in users home directory folder ``.nginxpla``\nwith config file in yaml format. When you run ``nginxpla`` it loads configuration, such as log_format for \nfile wich you try to analyze and templates with modules. The program is flexible enough \nin configuration to analyze almost any line-by-line logs that can be parsed by regular expressions. \nModular structure with several modules included.\n\n\n1. Installation\n###############\n\n::\n \n pip install nginxpla\n nginxpla --install\n nano ~/.nginxpla/nginxpla.yaml\n\n2. Usage\n########\n\n::\n\n Usage:\n nginxpla <access-log-file> [options]\n nginxpla <access-log-file> [options] (print) <var> ...\n nginxpla (-h | --help)\n nginxpla --version\n\n Options:\n -l <file>, --access-log <file> access log file to parse.\n -f <format>, --log-format <format> log format as specify in log_format directive. [default: combined]\n -i <seconds>, --interval <seconds> report interval when running in --top mode [default: 2.0]\n -t <template>, --template <template> use template from config file [default: main]\n -m <modules>, --modules <modules> comma separated module list [default: all]\n\n --info print configuration info for access_log\n --top watch for new lines as they are written to the access log.\n\n -g <var>, --group-by <var> group by variable [default: ]\n -w <var>, --having <expr> having clause [default: 1]\n -o <var>, --order-by <var> order of output for default query [default: count]\n -n <number>, --limit <number> limit the number of records included in report [default: 10]\n -a <exp> ..., --a <exp> ... add exp (must be aggregation exp: sum, avg, min, max, etc.) into output\n\n -v, --verbose more verbose output\n -d, --debug print every line and parsed record\n -h, --help print this help message.\n --version print version information.\n\n Advanced:\n -c <file>, --config <file> nginxpla config file path.\n -e <filter-expression>, --filter <filter-expression> filter in, records satisfied given expression are processed.\n -p <filter-expression>, --pre-filter <filter-expression> in-filter expression to check in pre-parsing phase.\n -s <sql-request>, --sql <sql-request> raw Sql in sqlite format. Table with data is log\n --fields <fields> Fields to import in sqllite log table, for example, --fields user_agent,status\n\n Examples:\n Print statistics for default template\n $ nginxpla access_log\n\n Select All indexed data from base\n $ nginxpla access_log --sql select * from log\n\n Select All indexed data from base\n $ nginxpla access_log --sql 'SELECT user_agent, status, count(1) AS count FROM log GROUP BY user_agent, status ORDER BY count DESC LIMIT 100' --fields user_agent,status\n\nConfiguration\n#############\n\nAfter install configure logs-section:\n\n::\n\n logs:\n mydomain:\n log_path_regexp: 'mydomain\\.access\\.log'\n format: \"default\"\n second_domain_name:\n log_path_regexp: 'second_domain_name\\.access\\.log'\n format: \"custom\"\n fallback_to_combined:\n log_path_regexp: '.*'\n format: \"combined\"\n\nIf you use custom nginx log_format or you want configure something different you can define formats in section:\n\n::\n\n formats:\n default: '$remote_addr - $remote_user [$time_local] \"$request\" $status $body_bytes_sent \"$http_x_forwarded_for\"'\n combined: '$remote_addr - $remote_user [$time_local] \"$request\" $status $body_bytes_sent \"$http_referer\" \"$http_user_agent\"'\n custom: '$http_x_forwarded_for - [$time_local] \"$host\" \"$request\" $status ($bytes_sent) \"$http_referer\" \"$uri $args\" \"$http_user_agent\" [$request_time] [$upstream_response_time]'\n\nImportant: After parse $variables will be columns in databse with same name and you can operate them\n\n``regex_formats``-section do the same as ``formats``. If you regex-guru you can speed-up parse by regex. ``regex_formats`` is prefered than simple way, if defined ``format`` and ``regex_format`` with the same name, ``regex_format`` will be used.\n\nSQL suffix\n**********\n\nFor better visualization I have add suffixes. Just add it to column name in SQL and all row of data will be formatted.\nSql suffix itself will be removed from result table column name.\n\n**_human_size** — size-formatter, convert digits like this 4399151 to this 4,20Mb\n\nExample\n\n::\n\n $ nginxpla access_log --fields request_path,body_bytes_sent query SELECT request_path, sum(body_bytes_sent) as bytes_sent_human_size GROUP BY request_path ORDER BY bytes_sent_human_size DESC LIMIT 10\n\n\nReport Table Column Human Name\n******************************\n\nAll column names from SQL will be transform to string with space-separated words.\nBut in your sql you should use original column names.\n\n::\n\n $ nginxpla access_log --fields se,request_path --filter=\"se=='Google Bot'\" query 'SELECT request_path as request_path_by_google_bot, count(1) as count FROM log GROUP BY request_path ORDER BY count DESC LIMIT 10'\n\n | Request Path By Google Bot | Count |\n |------------------------------+---------|\n | /c/202060826/new | 68 |\n | /c/202060826/discount | 29 |\n | /c/202001900 | 28 |\n | /c/202001107 | 22 |\n | /c/1000008746 | 17 |\n | /c/202060845 | 17 |\n | /c/202000010 | 16 |\n | /c/202061131 | 16 |\n | /c/202062183/new | 16 |\n | /c/202061132 | 15 |\n\n running for 18 seconds, 33923 records processed: 1789.62 req/sec\n\nPrint Format\n************\n\nFor simple queries you can user print syntax:\n\n::\n\n nginxpla <access-log-file> [options] (print) <var> ...\n\nThe print-syntax parser make some useful magick. It is ordering and auto results grouping.\nMagick fields is ``count``\n\n::\n\n $ nginxpla access_log --limit=0 print se count\n\nExample\n\n::\n\n # Uses Search Engine Module and Pattern Module\n\n $ nginxpla access_log --filter=\"se != '-'\" --limit=0 print se request_path_pattern count\n\n | Se | Request Path Pattern | Count |\n |--------------+------------------------+---------|\n | Yahoo Slurp | Product | 183522 |\n | Yahoo Slurp | Rubric | 106551 |\n | Yahoo Slurp | Brand | 18200 |\n | Google Bot | Rubric | 17549 |\n | Google Bot | Product | 10959 |\n | Google Bot | Brand | 3019 |\n\n running for 28 seconds, 361730 records processed: 12546.68 req/sec\n\n4. Modules\n----------\n\nPattern Module\n\nAllows to define your request path patterns. For example, in url structure on your project all brands have format like\n``/brand/slug...`` you can group them by pattern:\n\n::\n\n modules:\n pattern:\n package: \"module.pattern\"\n class: \"PatternModule\"\n ...\n options:\n ...\n brand:\n from: '^/brand/.*'\n to: \"Brand\"\n ...\n\nFor full module config see default `config example <https://github.com/evirma/nginxpla/blob/master/nginxpla/config/nginxpla.yaml>`_\n\nAll urls starts with ``/brand/`` will have field ``request_path_pattern`` with value 'Brand' and you can use this\nin you reports, prints or queries\n\n::\n\n $ nginxpla access_log print request_path_pattern count\n\n\nASN Module\n\nUse GeoLite2-ASN.mmdb to get ``asn`` and ``ans_name`` variables to ``record``. ``asn_name`` contains company name from whois\n\nASN Module Config\n\n.. code-block:: yaml\n\n asn:\n label: \"ASN Top:\"\n package: \"module.asn\"\n class: \"AsnModule\"\n fields: \n - asn\n - asn_name\n - remote_addr\n - bytes_sent\n - request_time\n inedxes: \n - asn_name\n sql: | \n SELECT\n asn AS ASN,\n asn_name AS Company,\n count(1) AS Count,\n sum(bytes_sent) AS sum_bytes_sent_human_size,\n sum(request_time) AS total_time,\n avg(request_time) AS avg_time,\n count(CASE WHEN status_type = 2 THEN 1 END) AS '2xx',\n count(CASE WHEN status_type = 3 THEN 1 END) AS '3xx',\n count(CASE WHEN status_type = 4 THEN 1 END) AS '4xx',\n count(CASE WHEN status_type = 5 THEN 1 END) AS '5xx'\n FROM log\n GROUP BY asn_name\n HAVING %(--having)s\n ORDER BY %(--order-by)s DESC\n LIMIT %(--limit)s\n\n\nModule API\n----------\n\nHOW IT WORKS\n\nWhen a string is parsed into variables, they are concatenated into a record. \nFurther, the recording goes in modules (``handle_record``), the module can change or add something to the record. \nAfter that, only part of the record goes to the database. What exactly gets in depends on the key ``fields`` in the settings file, this is needed for optimization.\nThen the ``report`` assembly starts. The report methods are called in the order specified in the config.\nThe ``handle_report`` method is launched using the same algorithm. But, it receives the resulting report as a parameter.\n\n\n- ``record`` - dict parsed from log line\n- ``report`` - text of all reports \n- ``ModuleConfi`` - object with module settings \n\nModule it is just a small Class with 3 methods and contructor.\n\n``handle_record`` - method takes only one parameter ``record`` and must return it back. You can modify it.\n``report`` - text of report, you can use sql to fetch data from db. If you don't like methods from config.store - you can get connection (``config.store.conn()``) and do what you want\n``handle_report`` - takes result report, must return it back\n\nEXAMPLE OF MODULE\n\n.. code-block:: python3\n \n \"\"\"\n Simple Module\n\n package: \"module.simple\"\n class: \"SimpleModule\"\n\n \"\"\"\n from nginxpla.utils import generate_table\n from nginxpla.module_config import ModuleConfig\n\n class SimpleModule:\n def handle_record(self, record):\n record['some_variable'] = 'some_value'\n return record\n\n def report(self):\n config = self.config\n [header, data] = config.storage.fetchtable(config.sql, config.arguments)\n return generate_table(header, data)\n\n def handle_report(self, report: str):\n report += \"something to append to the end of entire script's report\"\n return report\n \n def __init__(self, module_config: ModuleConfig):\n self.config = module_config" }, { "alpha_fraction": 0.6068601608276367, "alphanum_fraction": 0.6101583242416382, "avg_line_length": 32.68888854980469, "blob_id": "cc36b25a3411fc1d4f1582125c983304f4a59b8a", "content_id": "0d5412122200b781e0d26f9e5c59c58c9dd4e4e1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1516, "license_type": "permissive", "max_line_length": 99, "num_lines": 45, "path": "/nginxpla/config/module/you_name/demo.py", "repo_name": "evirma/nginxpla", "src_encoding": "UTF-8", "text": "from nginxpla.utils import generate_simple_table\nfrom nginxpla.module_config import ModuleConfig\nfrom nginxpla.nginxpla_module import NginxplaModule\n\n\nclass DemoModule(NginxplaModule):\n \"\"\"\n Demo Nginxpla Module\n\n if \"?\" found in ``request`` fills new variable ``has_query`` (1|0)\n \"\"\"\n def report(self):\n # It means we have not collect data, no data - no report\n if self.is_needed is False:\n return ''\n\n sql = \"SELECT has_query, count(*) as count FROM log GROUP BY has_query ORDER BY count DESC\"\n\n [header, data] = self.config.storage.fetchtable(sql, self.config.arguments)\n return generate_simple_table(header, data)\n\n def handle_record(self, record) -> str:\n # If user command params doesn't need in hash_query param\n if self.is_needed is False:\n # !!! always return record !!!\n return record\n\n # In some runs user we do not have request variable.\n # For example user parse something different from web-server logs\n if 'request' not in record:\n self.is_needed = False\n # !!! always return record !!!\n return record\n\n # If request contains ? that means it is has_query\n if record['request'].find('?') != -1:\n record['has_query'] = 1\n else:\n record['has_query'] = 0\n\n # !!! always return record !!!\n return record\n\n def __init__(self, module_config: ModuleConfig):\n self.config = module_config\n" }, { "alpha_fraction": 0.7274472117424011, "alphanum_fraction": 0.7274472117424011, "avg_line_length": 31.5625, "blob_id": "15dc0d2b38659ef4282d99b150decebf55d32a92", "content_id": "c67e7477fa174f162fe78beeaae4fdc2095bd9c1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 521, "license_type": "permissive", "max_line_length": 91, "num_lines": 16, "path": "/nginxpla/module/simple.py", "repo_name": "evirma/nginxpla", "src_encoding": "UTF-8", "text": "from nginxpla.module_config import ModuleConfig\nfrom nginxpla.nginxpla_module import NginxplaModule\nfrom nginxpla.reporter_helper import ReporterHelper\n\n\nclass SimpleModule(NginxplaModule):\n def report(self):\n config = self.config\n return ReporterHelper(config.storage, config.arguments).sql_reports(config.reports)\n\n def handle_record(self, record) -> str:\n self.is_needed = False\n return record\n\n def __init__(self, module_config: ModuleConfig):\n self.config = module_config\n" }, { "alpha_fraction": 0.578109860420227, "alphanum_fraction": 0.587618887424469, "avg_line_length": 28.61494255065918, "blob_id": "71b350d0a7352273b796da0c4c8e900d5e72683f", "content_id": "f9075cbc70b691d8a3fc578469a437381a327cc9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5153, "license_type": "permissive", "max_line_length": 115, "num_lines": 174, "path": "/nginxpla/utils.py", "repo_name": "evirma/nginxpla", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import division, absolute_import\n\nimport os\nimport atexit\nimport signal\nimport curses\nimport tabulate\nfrom shutil import copytree\nfrom nginxpla import HOME\nfrom nginxpla.error import error_exit\nfrom nginxpla.reporter import Reporter\nfrom nginxpla.config import Config\nfrom nginxpla.module_config import ModuleList, ModuleConfig\n\nDATA_DIR = os.path.join(HOME, 'data')\n\n\ndef makedirs(name, mode=0o750):\n try:\n os.makedirs(name, mode)\n except OSError:\n pass\n\n\ndef setup_reporter(reporter: Reporter, interval):\n scr = curses.initscr()\n atexit.register(curses.endwin)\n\n def print_report(sig, frame):\n output = reporter.report()\n\n scr.erase()\n try:\n scr.addstr(output)\n except curses.error:\n pass\n scr.refresh()\n\n signal.signal(signal.SIGALRM, print_report)\n interval = float(interval)\n signal.setitimer(signal.ITIMER_REAL, 0.1, interval)\n\n\ndef install():\n cwd = os.path.dirname(os.path.abspath(__file__))\n dist_dir = cwd + '/config'\n\n if os.path.exists(HOME):\n error_exit(\"Config directory \" + HOME + \" already exists. If you want reinstall, remove directory \" + HOME)\n\n copytree(dist_dir, HOME)\n\n\ndef generate_table(columns, data):\n column_types = []\n header = []\n for column in columns:\n if column[-11:] == '_human_size':\n column_types.append('human_size')\n column = column[:-11]\n else:\n column_types.append('default')\n\n header.append(human_header(column))\n\n tabledata = []\n for row in data:\n index = 0\n new_row = []\n for ctype in column_types:\n if ctype == 'human_size':\n new_row.append(human_size(row[index]))\n else:\n new_row.append(row[index])\n\n index += 1\n\n tabledata.append(new_row)\n\n return tabulate.tabulate(tabledata, headers=header, tablefmt='orgtbl', floatfmt='.3f',\n colalign=(\"left\", \"right\", \"right\", \"right\", \"right\"), disable_numparse=[0, 1, 2])\n\n\ndef generate_simple_table(columns, data):\n column_types = []\n header = []\n for column in columns:\n if column[-11:] == '_human_size':\n column_types.append('human_size')\n column = column[:-11]\n else:\n column_types.append('default')\n\n header.append(human_header(column))\n\n tabledata = []\n for row in data:\n index = 0\n new_row = []\n for ctype in column_types:\n if ctype == 'human_size':\n new_row.append(human_size(row[index]))\n else:\n new_row.append(row[index])\n\n index += 1\n\n tabledata.append(new_row)\n\n return tabulate.tabulate(tabledata, headers=header, tablefmt='orgtbl', floatfmt='.3f')\n\n\ndef human_size(num, suffix='b'):\n for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:\n if abs(num) < 1024.0:\n return \"%3.1f%s%s\" % (num, unit, suffix)\n num /= 1024.0\n return \"%.1f%s%s\" % (num, 'Yi', suffix)\n\n\ndef human_header(header):\n words = header.split('_')\n words = [word.capitalize() for word in words]\n\n return ' '.join(words)\n\n\ndef command_fields_parser(fields):\n select = []\n group = []\n for field in fields:\n if field == 'statuses':\n select.append(\"count(CASE WHEN status_type = 2 THEN 1 END) AS '2xx'\")\n select.append(\"count(CASE WHEN status_type = 3 THEN 1 END) AS '3xx'\")\n select.append(\"count(CASE WHEN status_type = 4 THEN 1 END) AS '4xx'\")\n select.append(\"count(CASE WHEN status_type = 5 THEN 1 END) AS '5xx'\")\n elif field == 'sum_request_time':\n select.append('sum(request_time) as sum_request_time')\n elif field == 'avg_request_time':\n select.append('sum(request_time) as sum_request_time')\n elif field == 'request_time':\n select.append('sum(request_time) as sum_request_time')\n select.append('avg(request_time) as avg_request_time')\n elif field == 'sum_bytes_sent':\n select.append('sum(bytes_sent) as sum_b_sent_human_size')\n elif field == 'avg_bytes_sent':\n select.append('sum(bytes_sent) as sum_b_sent_human_size')\n elif field == 'bytes_sent':\n select.append('sum(bytes_sent) as sum_b_sent_human_size')\n select.append('round(avg(bytes_sent)) as avg_b_sent_human_size')\n elif field == 'count':\n select.append('count(*) as count')\n elif field == 'statuses':\n select.append('count(*) as count')\n else:\n select.append(field)\n group.append(field)\n\n return [select, group]\n\n\ndef load_template_modules(config: Config):\n template = config.template_name\n templates = config.templates()\n modules = []\n for module_name in config.modules():\n module = ModuleConfig(module_name, templates[template]['modules'][module_name], config)\n modules.append(module)\n\n module_list = ModuleList(modules)\n module_list.set_storage(config.storage)\n\n return module_list\n" }, { "alpha_fraction": 0.6249062418937683, "alphanum_fraction": 0.6294073462486267, "avg_line_length": 26.77083396911621, "blob_id": "4957bb81b0103683a72dd4380230d7baef519794", "content_id": "646f727fb31dfc1c0b3ab909ac49627974071342", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1333, "license_type": "permissive", "max_line_length": 81, "num_lines": 48, "path": "/nginxpla/module/crawler.py", "repo_name": "evirma/nginxpla", "src_encoding": "UTF-8", "text": "from nginxpla.module_config import ModuleConfig\nfrom nginxpla.utils import generate_table\nfrom crawlerdetect import CrawlerDetect\nfrom functools import lru_cache\nfrom nginxpla.module.simple import SimpleModule\n\n\nclass CrawlerModule(SimpleModule):\n def handle_record(self, record):\n if self.is_needed is False:\n return record\n\n required = ['is_crawler', 'crawler']\n\n if self.is_needed is None and not self.config.is_required(set(required)):\n self.is_needed = False\n return record\n\n user_agent_string = record.get('http_user_agent', '')\n\n for k in required:\n record[k] = '-'\n\n if user_agent_string:\n record['crawler'] = self.parse_crawler(user_agent_string)\n\n return record\n\n @lru_cache(maxsize=102400)\n def parse_crawler(self, ua):\n if self.crawler_detect().isCrawler(ua):\n return self.crawler.getMatches()\n\n return '-'\n\n def crawler_detect(self):\n if self.crawler is None:\n self.crawler = CrawlerDetect()\n\n return self.crawler\n\n def __init__(self, module_config: ModuleConfig):\n super(CrawlerModule, self).__init__(module_config)\n\n self.crawler_cache = {}\n self.crawler = None\n self.is_needed = None\n self.config = module_config\n" }, { "alpha_fraction": 0.6509901285171509, "alphanum_fraction": 0.6584158539772034, "avg_line_length": 25.933332443237305, "blob_id": "5abfb90422dc049ae0b5e8982bd04839524f8c90", "content_id": "fef705ea06f760e400fc07b9c9d4d95df03a6a64", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 404, "license_type": "permissive", "max_line_length": 62, "num_lines": 15, "path": "/nginxpla/__init__.py", "repo_name": "evirma/nginxpla", "src_encoding": "UTF-8", "text": "__version__ = '0.0.6'\n__aka__ = 'Nginx Python Log Analyser'\n\nimport os\nimport sys\n\nVERSION = \"{} <{}> v{}\".format(__name__, __aka__, __version__)\n\nsys.path.append(os.path.dirname(os.path.realpath(__file__)))\n\nHOME = os.path.expanduser(\"~/.nginxpla\")\nCUSTOM_MODULES_DIR = os.path.expanduser(\"~/.nginxpla/module\")\nCONFIG_FILE = HOME + '/nginxpla.yaml'\n\nsys.path.append(os.path.dirname(CUSTOM_MODULES_DIR))\n" }, { "alpha_fraction": 0.5979653596878052, "alphanum_fraction": 0.5990957021713257, "avg_line_length": 29.505746841430664, "blob_id": "6971ff12864f22fd4243c947cda34bcb2e2c244f", "content_id": "316dfcf29fcfc658455ab5882b348e99ebf6bced", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2654, "license_type": "permissive", "max_line_length": 92, "num_lines": 87, "path": "/nginxpla/module_config.py", "repo_name": "evirma/nginxpla", "src_encoding": "UTF-8", "text": "from nginxpla.storage import LogStorage\nimport sys\nfrom nginxpla import CUSTOM_MODULES_DIR\n\ntry:\n # Python 3\n from collections.abc import MutableSequence\nexcept ImportError:\n # Python 2.7\n from collections import MutableSequence\n\n\nclass ModuleConfig(object):\n def __init__(self, name, module_config, config):\n self.name = name\n self.module_config = module_config\n self.config = config\n self.storage = False\n self.arguments = config.arguments\n self.sql = '' if 'sql' not in module_config else module_config['sql']\n self.reports = [] if 'reports' not in module_config else module_config['reports']\n\n self.fields = set([])\n if 'fields' in module_config:\n self.fields = set(module_config['fields'])\n\n self.indexes = set([])\n if 'indexes' in module_config:\n self.indexes = set(module_config['indexes'])\n\n self.label = '' if 'label' not in module_config else module_config['label']\n self.package = '' if 'package' not in module_config else module_config['package']\n self.class_name = '' if 'class' not in module_config else module_config['class']\n self.options = False if 'options' not in module_config else module_config['options']\n self.instance = False\n\n def factory(self):\n if self.instance is False:\n sys.path.append(CUSTOM_MODULES_DIR)\n mod = __import__(self.package, fromlist=[self.class_name])\n self.instance = getattr(mod, self.class_name)(self)\n return self.instance\n\n def get_fields(self):\n return self.fields\n\n def set_storage(self, storage: LogStorage):\n self.storage = storage\n\n def is_required(self, search) -> bool:\n fields = set(self.config.fields)\n if not fields:\n return False\n\n if isinstance(search, set):\n return bool(fields.intersection(search))\n else:\n for field in fields:\n if field == search:\n return True\n\n return False\n\n\nclass ModuleList(list):\n def __init__(self, *args):\n list.__init__(self, *args)\n\n def set_storage(self, storage: LogStorage):\n for module in self:\n module.set_storage(storage)\n\n def fields(self):\n fields = set([])\n for module in self:\n if module.fields:\n fields = fields.union(set(module.fields))\n\n return fields\n\n def indexes(self):\n indexes = set([])\n for module in self:\n if module.indexes:\n indexes = indexes.union(set(module.indexes))\n\n return indexes\n" }, { "alpha_fraction": 0.5678233504295349, "alphanum_fraction": 0.5725551843643188, "avg_line_length": 27.81818199157715, "blob_id": "f8f5a22e2b343e5dae57a1b8edf6a498cbc51c8f", "content_id": "156ef8da290a57e0baee084d071e7da86b45f661", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1268, "license_type": "permissive", "max_line_length": 119, "num_lines": 44, "path": "/setup.py", "repo_name": "evirma/nginxpla", "src_encoding": "UTF-8", "text": "from setuptools import setup\n\nfrom nginxpla import __version__\n\nsetup(\n name='nginxpla',\n version=__version__,\n description='Small and powerful real-time python nginx access log parser and analyzer with top-like style support',\n long_description=open('README.rst').read(),\n license='MIT',\n url='https://github.com/evirma/nginxpla',\n author='Eugene Myazin',\n author_email='eugene.myazin@gmail.com',\n classifiers=[\n 'License :: OSI Approved :: MIT License',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.2',\n 'Programming Language :: Python :: 3.3',\n ],\n keywords='cli monitoring nginx log access_log system',\n packages=[\"nginxpla\", \"nginxpla/module\"],\n install_requires=[\n 'docopt',\n 'tabulate',\n 'pyyaml',\n 'geoip2',\n 'pyparsing',\n 'crawlerdetect',\n 'ua-parser',\n 'user-agents',\n 'tqdm'\n ],\n package_data={\n \"nginxpla\": [\"config/*\", \"config/module/you_name/*\"]\n },\n entry_points={\n 'console_scripts': [\n 'nginxpla = nginxpla.__main__:main',\n ],\n },\n)\n" }, { "alpha_fraction": 0.5429838299751282, "alphanum_fraction": 0.5493446588516235, "avg_line_length": 28.816091537475586, "blob_id": "1035f4062985d8c59cd02948bc7a4ad4369ebbe8", "content_id": "0416daf0a794b2e05002887f9fd5cc71fbcc30b6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5188, "license_type": "permissive", "max_line_length": 88, "num_lines": 174, "path": "/nginxpla/processor.py", "repo_name": "evirma/nginxpla", "src_encoding": "UTF-8", "text": "\"\"\"\nLog File Lines Processor\n\"\"\"\nimport sys\nimport time\nimport re\nimport os\n\nfrom nginxpla.config import Config\nfrom nginxpla.module_config import ModuleList\n\ntry:\n import urlparse\nexcept ImportError:\n import urllib.parse as urlparse\n\n\ndef seek_n_lines(f, n):\n assert n >= 0\n pos, lines = n + 1, []\n while len(lines) <= n:\n try:\n f.seek(-pos, 2)\n print(\"try1\")\n except IOError:\n print(\"try2\")\n f.seek(0)\n break\n finally:\n print(\"try3\")\n lines = list(f)\n pos *= 2\n\n\ndef tail(the_file):\n \"\"\"\n Get the tail of a given file\n \"\"\"\n with open(the_file) as f:\n f.seek(0, os.SEEK_END)\n try:\n f.seek(f.tell() - 100000, os.SEEK_SET)\n except IOError:\n f.seek(0, os.SEEK_END)\n except ValueError:\n f.seek(0, os.SEEK_END)\n\n while True:\n line = f.readline()\n if not line:\n time.sleep(0.1) # sleep briefly before trying again\n continue\n yield line\n\n\ndef build_source(access_log, arguments):\n # constructing log source\n if access_log == 'stdin':\n lines = sys.stdin\n elif arguments['--top']:\n lines = tail(access_log)\n else:\n lines = open(access_log)\n return lines\n\n\ndef records_transformer(records, handlers, config: Config):\n for record in records:\n aliases = config.aliases()\n for alias in aliases:\n if alias in record and aliases[alias] not in record:\n record[aliases[alias]] = record[alias]\n\n if 'status' in record:\n record['status'] = int(record['status'])\n\n if 'bytes_sent' in record:\n record['bytes_sent'] = int(record['bytes_sent'])\n\n if 'request_time' in record:\n record['request_time'] = float(record['request_time'])\n\n if 'upstream_response_time' in record:\n urt = record['upstream_response_time']\n record['upstream_response_time'] = 0 if urt == '-' else float(urt)\n\n if config.is_field_needed('status') or config.is_field_needed('status_type'):\n if 'status' in record:\n record['status_type'] = int(record['status']) // 100\n else:\n record['status_type'] = '-'\n\n if config.is_field_needed('bytes_sent'):\n if 'bytes_sent' not in record:\n if 'body_bytes_sent' in record:\n record['bytes_sent'] = int(record['body_bytes_sent'])\n else:\n record['bytes_sent'] = 0\n\n if config.is_field_needed('method') or config.is_field_needed('request_path'):\n if 'request_uri' in record:\n record['method'] = '-'\n record['request_path'] = record['request_uri']\n elif 'request' in record:\n request_parts = record['request'].split(' ')\n uri = ' '.join(request_parts[1:-1])\n\n record['method'] = ''.join(request_parts[0:1])\n record['request_path'] = ''.join(uri.split('?')[0])\n else:\n record['method'] = '-'\n record['request_path'] = '-'\n\n try:\n for handler in handlers:\n if handler.is_needed is not False:\n record = handler.handle_record(record)\n except ValueError:\n pass\n\n yield record\n\n\ndef parse_log(lines, pattern, config: Config, modules: ModuleList):\n handlers = set([])\n for module in modules:\n handlers.add(module.factory())\n\n matches = (pattern.match(line) for line in lines)\n records = (m.groupdict() for m in matches if m is not None)\n records = records_transformer(records, handlers, config)\n\n return records\n\n\nclass Processor:\n REGEX_SPECIAL_CHARS = r'([\\.\\*\\+\\?\\|\\(\\)\\{\\}\\[\\]])'\n REGEX_LOG_FORMAT_VARIABLE = r'\\$([a-zA-Z0-9\\_]+)'\n\n def __init__(self, config: Config, modules: ModuleList):\n self.config = config\n self.modules = modules\n\n def process(self):\n config = self.config\n access_log = config.access_log\n lines = build_source(access_log, config.arguments)\n\n log_format_regex = config.match_log_format(access_log, 'regex_formats')\n\n if log_format_regex:\n pattern = re.compile(log_format_regex)\n else:\n log_format = config.match_log_format(access_log, 'format')\n pattern = self.build_pattern(log_format)\n\n pre_filer_exp = config.arguments['--pre-filter']\n if pre_filer_exp:\n lines = (line for line in lines if eval(pre_filer_exp, {}, dict(line=line)))\n\n records = parse_log(lines, pattern, config, self.modules)\n\n filter_exp = config.arguments['--filter']\n\n if filter_exp:\n records = (r for r in records if eval(filter_exp, {}, r))\n\n config.storage.import_records(records)\n\n def build_pattern(self, log_format):\n pattern = re.sub(self.REGEX_SPECIAL_CHARS, r'\\\\\\1', log_format)\n pattern = re.sub(self.REGEX_LOG_FORMAT_VARIABLE, '(?P<\\\\1>.*)', pattern)\n\n return re.compile(pattern)\n" }, { "alpha_fraction": 0.6185566782951355, "alphanum_fraction": 0.6185566782951355, "avg_line_length": 25.94444465637207, "blob_id": "0e033649d8ef6cd785a16dcf09dab8dc8491b9f2", "content_id": "d48dd5c4d9dba6fe0859ac573254515c189d53d9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 970, "license_type": "permissive", "max_line_length": 81, "num_lines": 36, "path": "/nginxpla/module/referer.py", "repo_name": "evirma/nginxpla", "src_encoding": "UTF-8", "text": "from nginxpla.module_config import ModuleConfig\nfrom urllib.parse import urlparse\nfrom nginxpla.module.simple import SimpleModule\n\n\nclass RefererModule(SimpleModule):\n def handle_record(self, record):\n if self.is_needed is False:\n return record\n\n required = ['referer_domain', 'referer']\n\n if self.is_needed is None and not self.config.is_required(set(required)):\n self.is_needed = False\n return record\n\n record['referer_domain'] = '-'\n\n if 'referer' not in record:\n return record\n\n referer = record['referer']\n\n if referer == '-':\n return record\n\n parsed_uri = urlparse(referer)\n record['referer_domain'] = '{uri.netloc}'.format(uri=parsed_uri)\n\n return record\n\n def __init__(self, module_config: ModuleConfig):\n super(RefererModule, self).__init__(module_config)\n\n self.is_needed = None\n self.config = module_config\n" }, { "alpha_fraction": 0.5620553493499756, "alphanum_fraction": 0.5691699385643005, "avg_line_length": 27.11111068725586, "blob_id": "350d2d78fd04d23f922817e9c6ad87058c6a19e9", "content_id": "645afd4eaac9343b36b5c47de6fc4ec110676f19", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1265, "license_type": "permissive", "max_line_length": 81, "num_lines": 45, "path": "/nginxpla/module/search_engine.py", "repo_name": "evirma/nginxpla", "src_encoding": "UTF-8", "text": "from nginxpla.module_config import ModuleConfig\nfrom nginxpla.module.simple import SimpleModule\nfrom functools import lru_cache\n\n\nclass SearchEngineModule(SimpleModule):\n def handle_record(self, record):\n if self.is_needed is False:\n return record\n\n required = ['se']\n if self.is_needed is None and not self.config.is_required(set(required)):\n self.is_needed = False\n return record\n\n ua = record.get('http_user_agent', None)\n if not ua:\n record['se'] = '-'\n else:\n record['se'] = self.get_search_engine_by_ua(ua)\n\n return record\n\n @lru_cache(maxsize=102400)\n def get_search_engine_by_ua(self, ua):\n options = self.config.options\n se = '-'\n for p in options['engines'].values():\n if self.search(ua, p['searches']):\n se = p['title']\n break\n return se\n\n @staticmethod\n def search(what, where):\n for s in where:\n if what.find(s) != -1:\n return 1\n return 0\n\n def __init__(self, module_config: ModuleConfig):\n super(SearchEngineModule, self).__init__(module_config)\n\n self.is_needed = None\n self.config = module_config\n" }, { "alpha_fraction": 0.6345381736755371, "alphanum_fraction": 0.6345381736755371, "avg_line_length": 16.785715103149414, "blob_id": "5ca0c3a1f253ff2494577e4448874730ddbc8f69", "content_id": "64ceebc77ce40e1c33743bbecc2c36f9d1d71d9d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 249, "license_type": "permissive", "max_line_length": 48, "num_lines": 14, "path": "/nginxpla/nginxpla_module.py", "repo_name": "evirma/nginxpla", "src_encoding": "UTF-8", "text": "from abc import ABCMeta, abstractmethod\n\n\nclass NginxplaModule:\n __metaclass__ = ABCMeta\n\n is_needed = None\n\n @abstractmethod\n def report(self) -> str:\n pass\n\n def handle_report(self, report: str) -> str:\n return report\n" }, { "alpha_fraction": 0.6110695600509644, "alphanum_fraction": 0.6140613555908203, "avg_line_length": 30.83333396911621, "blob_id": "9f11a0ef1ac5ee1746bdbd8f53175a1502229c10", "content_id": "fbbcb49028a2355e81f81c7d710f1626fc01944f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1337, "license_type": "permissive", "max_line_length": 90, "num_lines": 42, "path": "/nginxpla/module/pattern.py", "repo_name": "evirma/nginxpla", "src_encoding": "UTF-8", "text": "import re\nfrom nginxpla.error import error_exit\nfrom nginxpla.module_config import ModuleConfig\nfrom nginxpla.module.simple import SimpleModule\n\n\nclass PatternModule(SimpleModule):\n def handle_record(self, record):\n if self.is_needed is False:\n return record\n\n if self.is_needed is None and not self.config.is_required('request_path_pattern'):\n self.is_needed = False\n return record\n\n request_path_pattern = record.get('request_path', None)\n\n if request_path_pattern is None:\n record['request_path_pattern'] = '-'\n return record\n\n options = self.config.options\n\n if not options:\n error_exit(\"Options not found\")\n\n sections = int(options['sections'])\n if sections > 0:\n parts = request_path_pattern.split('/', sections + 1)[1:sections + 1]\n request_path_pattern = '/' + '/'.join(parts) + '...'\n\n for p in options['replaces'].values():\n request_path_pattern = re.sub(p['from'], p['to'], request_path_pattern)\n\n record['request_path_pattern'] = request_path_pattern\n return record\n\n def __init__(self, module_config: ModuleConfig):\n super(PatternModule, self).__init__(module_config)\n\n self.is_needed = None\n self.config = module_config\n" }, { "alpha_fraction": 0.5703042149543762, "alphanum_fraction": 0.5703042149543762, "avg_line_length": 29.32903289794922, "blob_id": "c79acdf65f3f9844e4e13e6727e36d372277bfbf", "content_id": "1ddb76e0a33a935b5438827c96a802b8845f1544", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4701, "license_type": "permissive", "max_line_length": 82, "num_lines": 155, "path": "/nginxpla/config.py", "repo_name": "evirma/nginxpla", "src_encoding": "UTF-8", "text": "import os\nimport re\nimport yaml\nimport os.path\n\nfrom nginxpla.error import error_exit\nfrom nginxpla import HOME, CONFIG_FILE\nfrom nginxpla.storage import LogStorage\n\n\nclass Config(object):\n def __init__(self, config_file: None, arguments):\n self.access_log = arguments['<access-log-file>']\n\n if self.access_log != 'stdin' and not os.path.exists(self.access_log):\n error_exit('access log file \"%s\" does not exist' % self.access_log)\n\n if config_file is None:\n config_file = CONFIG_FILE\n\n self.config_file = config_file\n if not os.path.exists(config_file):\n error_exit('nginxpla config file not found: %s' % config_file)\n\n requested_modules = arguments['--modules']\n\n if requested_modules == 'all':\n self.requested_modules = None\n else:\n self.requested_modules = requested_modules.split(',')\n\n self.arguments = arguments\n self.template_name = arguments['--template']\n self.config = self._load_config(config_file)\n self.fields = self._collect_fields()\n self.indexes = self._collect_indexes()\n self.storage = LogStorage(self.fields, self.indexes)\n\n def is_field_needed(self, field):\n return field in self.fields\n\n def fields_union(self, fields: set):\n self.fields = self.fields.union(fields)\n\n def modules(self):\n template = self.template(self.template_name)\n\n result = []\n for module in template['modules']:\n if self.requested_modules is None or module in self.requested_modules:\n result.append(module)\n\n return result\n\n def templates(self):\n return self.config.get('templates', [])\n\n def template(self, name: str):\n templates = self.templates()\n if name in templates:\n return templates[name]\n\n return {}\n\n def aliases(self):\n result = {}\n\n aliases = self.get('aliases', [])\n for result_value in self.get('aliases', []):\n for alias in aliases[result_value]:\n result[alias] = result_value\n\n return result\n\n def get(self, key, default=None):\n return self.config.get(key, default)\n\n @staticmethod\n def user_default_config():\n return HOME + '/nginxpla.yaml'\n\n @staticmethod\n def _load_config(config_file):\n cfg = None\n if os.path.isfile(config_file):\n with open(config_file) as f:\n cfg = yaml.safe_load(f)\n\n return cfg\n\n def _collect_indexes(self) -> set:\n indexes = self.fields\n\n template = self.template(self.template_name)\n for module in self.modules():\n if 'indexes' in template['modules'][module]:\n module_fields = template['modules'][module]['indexes']\n indexes = indexes.union(set(module_fields))\n\n return indexes.intersection(self.fields)\n\n def _collect_fields(self) -> set:\n fields = set([])\n if self.arguments['--fields']:\n fields = fields.union(self.arguments['--fields'].split(','))\n\n if not self.arguments['<var>']:\n template = self.template(self.template_name)\n for module in self.modules():\n if 'fields' in template['modules'][module]:\n module_fields = template['modules'][module]['fields']\n fields = fields.union(set(module_fields))\n\n fields = fields.union(self._collect_fields_from_var())\n\n return fields\n\n def _collect_fields_from_var(self):\n fields = self.arguments['<var>']\n\n result = []\n disabled_fields = ['count', 'statuses']\n for field in fields:\n if field not in disabled_fields:\n result.append(field)\n elif field == 'statuses':\n result.append('status')\n result.append('status_type')\n\n return set(result)\n\n def match_log_format(self, access_log, format_section: 'format'):\n formats = self.get(format_section, [])\n if not formats:\n return ''\n\n if self.arguments['--log-format']:\n format_name = self.arguments['--log-format']\n else:\n format_name = ''\n\n logs = self.get('logs', [])\n for log_section in logs:\n log_data = logs[log_section]\n if re.search(log_data['log_path_regexp'], access_log):\n format_name = logs[log_section]['format']\n break\n\n if format_name == '':\n format_name = 'combined'\n\n if format_name in formats:\n return str(formats[format_name])\n\n return ''\n" }, { "alpha_fraction": 0.5785340070724487, "alphanum_fraction": 0.5785340070724487, "avg_line_length": 23.645160675048828, "blob_id": "eb727fd64ac33aad1457e7d4898e724acbf31d88", "content_id": "c1a76648fe5dbbb52b0d8fff75e268b788515f0f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 764, "license_type": "permissive", "max_line_length": 60, "num_lines": 31, "path": "/nginxpla/reporter.py", "repo_name": "evirma/nginxpla", "src_encoding": "UTF-8", "text": "from __future__ import print_function\n\nfrom nginxpla.config import Config\nfrom nginxpla.module_config import ModuleList\n\n\nclass Reporter:\n def __init__(self, config: Config, modules: ModuleList):\n self.config = config\n self.modules = modules\n\n def report(self):\n if not self.config.storage.is_started():\n return ''\n\n output = []\n\n for module in self.modules:\n label = module.label\n if label:\n label += \"\\n\"\n\n module_report = module.factory().report()\n output.append('%s%s' % (label, module_report))\n\n report = ''.join(output)\n\n for module in self.modules:\n report = module.factory().handle_report(report)\n\n return report\n" }, { "alpha_fraction": 0.5168706178665161, "alphanum_fraction": 0.5228031277656555, "avg_line_length": 31.10714340209961, "blob_id": "5abeed3d951344afbaab54f1f988f62a1f6813e1", "content_id": "45ceac20f1adf8e9f954ff8ff376ccde865dd110", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2697, "license_type": "permissive", "max_line_length": 81, "num_lines": 84, "path": "/nginxpla/module/user_agent.py", "repo_name": "evirma/nginxpla", "src_encoding": "UTF-8", "text": "from nginxpla.module_config import ModuleConfig\nfrom nginxpla.utils import generate_table\nfrom user_agents import parse\nfrom nginxpla.nginxpla_module import NginxplaModule\nfrom functools import lru_cache\n\n\nclass UserAgentModule(NginxplaModule):\n def report(self):\n config = self.config\n\n if not config.sql:\n return ''\n\n [header, data] = config.storage.fetchtable(config.sql, config.arguments)\n return generate_table(header, data)\n\n def handle_record(self, record):\n if self.is_needed is False:\n return record\n\n required = ['browser',\n 'browser_version',\n 'os',\n 'os_version',\n 'device',\n 'device_brand',\n 'device_version',\n 'is_mobile',\n 'is_tablet',\n 'is_touch_capable',\n 'is_pc',\n 'is_bot',\n 'device_type']\n\n if self.is_needed is None and not self.config.is_required(set(required)):\n self.is_needed = False\n return record\n\n user_agent_string = record.get('http_user_agent', '')\n\n for k in required:\n record[k] = '-'\n\n if user_agent_string:\n ua = self.parse_ua(user_agent_string)\n\n record['browser'] = ua.browser.family\n record['browser_version'] = ua.browser.version_string\n\n record['os'] = ua.os.family\n record['os_version'] = ua.os.version_string\n\n record['device'] = ua.device.family\n record['device_brand'] = ua.device.brand\n record['device_model'] = ua.device.model\n\n record['is_mobile'] = 1 if ua.is_mobile else 0\n record['is_tablet'] = 1 if ua.is_tablet else 0\n record['is_touch_capable'] = 1 if ua.is_touch_capable else 0\n record['is_pc'] = 1 if ua.is_pc else 0\n record['is_bot'] = 1 if ua.is_bot else 0\n\n if record['is_tablet']:\n record['device_type'] = 'tablet'\n elif record['is_mobile']:\n record['device_type'] = 'mobile'\n elif record['is_touch_capable']:\n record['device_type'] = 'touch'\n elif record['is_pc']:\n record['device_type'] = 'pc'\n elif record['is_bot']:\n record['device_type'] = 'bot'\n\n return record\n\n @lru_cache(maxsize=102400)\n def parse_ua(self, ua):\n return parse(ua)\n\n def __init__(self, module_config: ModuleConfig):\n self.ua_cache = {}\n self.is_needed = None\n self.config = module_config\n" }, { "alpha_fraction": 0.6181262731552124, "alphanum_fraction": 0.6237270832061768, "avg_line_length": 29.21538543701172, "blob_id": "9833d6c9aa27972ed7a77dafa4de958354f25a32", "content_id": "d4a3c57a2cae6b42cfe5ddae9211563eaca291fb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1964, "license_type": "permissive", "max_line_length": 101, "num_lines": 65, "path": "/nginxpla/module/asn.py", "repo_name": "evirma/nginxpla", "src_encoding": "UTF-8", "text": "import os\nfrom nginxpla.utils import generate_table\nfrom nginxpla.module_config import ModuleConfig\nfrom nginxpla.nginxpla_module import NginxplaModule\nfrom nginxpla.reporter_helper import ReporterHelper\nfrom functools import lru_cache\nimport geoip2.database\nfrom geoip2.errors import AddressNotFoundError\n\n\nclass AsnModule(NginxplaModule):\n def report(self):\n if self.is_needed is False or not self.file_exists:\n return ''\n\n config = self.config\n return ReporterHelper(config.storage, config.arguments).sql_reports(config.reports)\n\n def handle_record(self, record):\n if self.is_needed is False:\n return record\n\n required = ['asn', 'asn_name']\n if self.is_needed is None and not self.config.is_required(set(required)):\n self.is_needed = False\n return record\n\n record['asn'] = '-'\n record['asn_name'] = '-'\n\n if 'remote_addr' not in record:\n return record\n\n if self.file_exists:\n [asn, asn_name] = self.get_asn(record['remote_addr'])\n record['asn'] = asn\n record['asn_name'] = asn_name\n\n return record\n\n def handle_report(self, report: str):\n return report\n\n @lru_cache(maxsize=102400)\n def get_asn(self, ip):\n try:\n with geoip2.database.Reader(self.file) as reader:\n response = reader.asn(ip)\n result = [response.autonomous_system_number, response.autonomous_system_organization]\n except AddressNotFoundError:\n result = ['-', '-']\n\n return result\n\n def __init__(self, module_config: ModuleConfig):\n options = module_config.options\n\n self.file = False\n self.file_exists = False\n\n if options and 'geolite2_asn_file' in options:\n self.file = options['geolite2_asn_file']\n self.file_exists = os.path.exists(self.file)\n\n self.config = module_config\n" }, { "alpha_fraction": 0.6281259059906006, "alphanum_fraction": 0.6307951807975769, "avg_line_length": 34.23762512207031, "blob_id": "173ce3b6e9a86cf7ba27b409453949828006cd1d", "content_id": "149544048426fc299cd2015d372001e9d3faa33a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7118, "license_type": "permissive", "max_line_length": 119, "num_lines": 202, "path": "/nginxpla/__main__.py", "repo_name": "evirma/nginxpla", "src_encoding": "UTF-8", "text": "\"\"\"nginxpla - nginx access log analyzer.\n\nUsage:\n nginxpla <access-log-file> [options]\n nginxpla <access-log-file> [options] (print) <var> ...\n nginxpla <access-log-file> [options] (query) <query> ...\n nginxpla [options]\n nginxpla (-h | --help)\n nginxpla --install\n nginxpla --version\n\nOptions:\n -l <file>, --access-log <file> access log file to parse.\n -f <format>, --log-format <format> log format as specify in log_format directive. [default: ]\n -i <seconds>, --interval <seconds> report interval when running in --top mode [default: 2.0]\n -t <template>, --template <template> use template from config file [default: main]\n -m <modules>, --modules <modules> comma separated module list [default: all]\n\n --info print configuration info for access_log\n --top watch for new lines as they are written to the access log.\n\n --fields <fields> Fields to import in sqlite3 log table, for example, --fields user_agent,status\n -g <var>, --group-by <var> group by variable [default: ]\n -w <var>, --having <expr> having clause [default: 1]\n -o <var>, --order-by <var> order of output for default query [default: count]\n -n <number>, --limit <number> limit the number of records included in report [default: 10]\n\n -v, --verbose more verbose output\n -d, --debug print every line and parsed record\n -h, --help print this help message.\n --version print version information.\n\n Advanced:\n -c <file>, --config <file> nginxpla config file path.\n -e <filter-expression>, --filter <filter-expression> filter in, records satisfied given expression are processed.\n -p <filter-expression>, --pre-filter <filter-expression> in-filter expression to check in pre-parsing phase.\n\nExamples:\n Show reports for main template\n $ nginxpla access_log\n\n Show reports for seo template\n $ nginxpla access_log --template seo\n\n Print reports for main template asn and referrer modules only\n $ nginxpla access_log --template seo --modules asn,referer\n\n Print report table for request_path counts\n $ nginxpla access_log print request_path count\n\n Select All indexed data from base\n $ nginxpla access_log query select * from log\n\n Select User Agent Statuses\n $ nginxpla access_log query 'SELECT user_agent, status, count(1) AS count FROM\n log GROUP BY user_agent, status ORDER BY count DESC LIMIT 100' --fields user_agent,status\n\n Average body bytes sent of 200 responses of requested path begin with '/catalog':\n $ nginxpla access_log --filter 'status == 200 and request_path.startswith(\"/catalog\")'\n\n Analyze apache access log from remote machine using 'common' log format\n $ ssh remote tail -f /var/log/apache2/access.log | nginxpla -f custom\n\"\"\"\nfrom __future__ import print_function, absolute_import\nimport sys\nimport time\nimport logging\nfrom docopt import docopt\nfrom nginxpla.error import message_exit\nfrom nginxpla.config import Config\nfrom nginxpla.reporter import Reporter\nfrom nginxpla.processor import Processor\nfrom nginxpla.utils import command_fields_parser, generate_simple_table, install, setup_reporter, load_template_modules\n\n\ndef process(arguments):\n start = time.time()\n\n if arguments['--install']:\n install()\n from nginxpla import CONFIG_FILE\n message_exit(\"Config installed successful. Please, edit %s\" % CONFIG_FILE)\n\n if arguments['<access-log-file>'] is None and not sys.stdin.isatty():\n arguments['<access-log-file>'] = 'stdin'\n\n #\n # Loading and parse config file\n # It's init modules too\n #\n config = Config(arguments['--config'], arguments)\n modules = load_template_modules(config)\n\n logging.debug(\"config file: %s\", arguments['--config'])\n logging.debug(\"config: %s\", config)\n\n #\n # Init Storage and set storage to each ModuleConfig\n #\n if arguments['--info']:\n access_log = arguments['<access-log-file>']\n log_format_regex = config.match_log_format(access_log, 'regex_formats')\n\n print('Config File: %s' % config.config_file, end=\"\\n\\n\")\n print('Log File: %s' % arguments['<access-log-file>'], end=\"\\n\\n\")\n print('Template: %s' % config.template_name)\n print('Modules: %s' % ','.join(config.modules()))\n print('Fields: %s' % ','.join(config.fields))\n\n if log_format_regex:\n print('Log Format RegExp: %s' % log_format_regex)\n else:\n log_format = config.match_log_format(access_log, 'formats')\n print('Log Format: %s' % log_format)\n else:\n output = ''\n reporter = Reporter(config, modules)\n\n if arguments['--top'] and not arguments['print']:\n setup_reporter(reporter, arguments['--interval'])\n\n Processor(config, modules).process()\n\n if arguments['print']:\n print_command_builder(arguments, config.storage)\n elif arguments['query']:\n [header, data] = config.storage.fetchtable(' '.join(arguments['<query>']))\n output += generate_simple_table(header, data)\n else:\n output += reporter.report()\n print()\n\n runtime = time.time() - start\n lines = config.storage.count()\n\n output = output.rstrip(\"\\n\")\n output += \"\\n\\n > running for %i seconds, %i records processed: %.2f req/sec\" % \\\n (runtime, lines, lines / runtime)\n\n print(output)\n\n\ndef print_command_builder(arguments, storage):\n fields = arguments['<var>']\n [select, group_by] = command_fields_parser(fields)\n\n params = {'--select-fields': \",\\n\\t\".join(select),\n '--having': arguments['--having'],\n '--limit': int(arguments['--limit'])}\n\n if 'count' in fields and arguments['--group-by'] == 'count':\n params['--group-by'] = 'count'\n elif arguments['--group-by'] != '':\n params['--group-by'] = arguments['--group-by']\n else:\n params['--group-by'] = \",\\n\\t\".join(group_by)\n\n if 'count' in fields and arguments['--order-by'] == 'count':\n params['--order-by'] = 'count'\n else:\n params['--order-by'] = arguments['--order-by']\n\n query = \"SELECT\\n\\t%(--select-fields)s\\nFROM log\"\n\n if params['--group-by']:\n query += \"\\nGROUP BY %(--group-by)s\"\n\n if params['--having']:\n query += \"\\nHAVING %(--having)s\"\n\n if params['--order-by']:\n query += \"\\nORDER BY %(--order-by)s DESC\"\n\n if params['--limit']:\n query += \"\\nLIMIT %(--limit)s\"\n\n logging.debug(\"SQL:\\n%s\", (query % params))\n\n [header, data] = storage.fetchtable(query, params)\n print(generate_simple_table(header, data))\n\n\ndef main():\n from nginxpla import VERSION\n args = docopt(__doc__, version=VERSION)\n\n log_level = logging.WARNING\n if args['--verbose']:\n log_level = logging.INFO\n if args['--debug']:\n log_level = logging.DEBUG\n logging.basicConfig(level=log_level, format='%(levelname)s: %(message)s')\n logging.debug('arguments:\\n%s', args)\n\n try:\n process(args)\n except KeyboardInterrupt:\n sys.exit(0)\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.5277628302574158, "alphanum_fraction": 0.5331536531448364, "avg_line_length": 28.44444465637207, "blob_id": "2ec3f39a65cf12668cee25fd112af47be404b4a3", "content_id": "67d387fcf1b7a9d2261f13129ebe29f8cfc5b17e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1855, "license_type": "permissive", "max_line_length": 115, "num_lines": 63, "path": "/nginxpla/reporter_helper.py", "repo_name": "evirma/nginxpla", "src_encoding": "UTF-8", "text": "from __future__ import print_function\n\nimport tabulate\nfrom nginxpla.storage import LogStorage\nfrom nginxpla.utils import human_header, human_size\n\n\nclass ReporterHelper:\n def __init__(self, storage: LogStorage, arguments):\n self.storage = storage\n self.arguments = arguments\n\n def sql_reports(self, reports):\n output = ''\n for report_name in reports:\n sql = reports[report_name].get('sql', '')\n label = reports[report_name].get('label', '')\n\n if sql:\n output += self.sql_report(label, sql, self.arguments)\n\n return output\n\n def sql_report(self, label, sql, arguments):\n [header, data] = self.storage.fetchtable(sql, arguments)\n\n output = ''\n\n if label:\n output += label + \"\\n\\n\"\n\n output += self.generate_table(header, data) + \"\\n\\n\"\n return output\n\n @staticmethod\n def generate_table(columns, data):\n column_types = []\n header = []\n for column in columns:\n if column[-11:] == '_human_size':\n column_types.append('human_size')\n column = column[:-11]\n else:\n column_types.append('default')\n\n header.append(human_header(column))\n\n tabledata = []\n for row in data:\n index = 0\n new_row = []\n for ctype in column_types:\n if ctype == 'human_size':\n new_row.append(human_size(row[index]))\n else:\n new_row.append(row[index])\n\n index += 1\n\n tabledata.append(new_row)\n\n return tabulate.tabulate(tabledata, headers=header, tablefmt='orgtbl', floatfmt='.3f',\n colalign=(\"left\", \"right\", \"right\", \"right\", \"right\"), disable_numparse=[0, 1, 2])\n" }, { "alpha_fraction": 0.5623847842216492, "alphanum_fraction": 0.5666871666908264, "avg_line_length": 33.61701965332031, "blob_id": "683b1eb2656b9f0c48a997a21154dbf88fe1411c", "content_id": "56dbf32583abfe58bcf57d638bbceb1eb9069bd4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3254, "license_type": "permissive", "max_line_length": 112, "num_lines": 94, "path": "/nginxpla/storage.py", "repo_name": "evirma/nginxpla", "src_encoding": "UTF-8", "text": "import sqlite3\nimport logging\nimport sys\nfrom nginxpla.error import error_exit\nfrom contextlib import closing\nfrom tqdm import tqdm\n\n\nclass LogStorage(object):\n def fetchall(self, query, arguments):\n with closing(self.conn.cursor()) as cursor:\n cursor.execute(query % arguments)\n data = cursor.fetchall()\n return data\n\n def fetchone(self, query, arguments):\n with closing(self.conn.cursor()) as cursor:\n cursor.execute(query % arguments)\n return cursor.fetchone()[0]\n\n def fetchtable(self, query, arguments=None):\n if arguments is None:\n arguments = []\n\n with closing(self.conn.cursor()) as cursor:\n cursor.execute(query % arguments)\n columns = (d[0] for d in cursor.description)\n data = cursor.fetchall()\n\n return [columns, data]\n\n def init_db(self):\n create_table = 'create table log (%s)' % self.column_list\n with closing(self.conn.cursor()) as cursor:\n logging.info('sqlite init: %s', create_table)\n cursor.execute(create_table)\n for idx, field in enumerate(self.indexes):\n sql = 'create index log_idx%d on log (%s)' % (idx, field)\n logging.info('sqlite init: %s', sql)\n cursor.execute(sql)\n\n def count(self):\n with closing(self.conn.cursor()) as cursor:\n cursor.execute('select count(1) from log')\n return cursor.fetchone()[0]\n\n def __init__(self, fields, indexes=None):\n self.started = False\n self.report_queries = None\n self.indexes = indexes if indexes is not None else []\n self.fields = set(fields)\n\n if len(fields) == 0:\n error_exit(\"Field list to import in sqlite3 is empty\")\n\n self.column_list = ','.join(fields)\n self.holder_list = ','.join(':%s' % var for var in fields)\n self.conn = sqlite3.connect(':memory:', isolation_level='DEFERRED')\n self.init_db()\n\n def conn(self):\n self.conn.cursor().execute('''PRAGMA synchronous = OFF''')\n self.conn.cursor().execute('''PRAGMA journal_mode = OFF''')\n return self.conn\n\n def import_records(self, records):\n sql = 'insert into log (%s) values (%s)' % (self.column_list, self.holder_list)\n\n bulk = []\n\n logging.debug('sqlite insert: %s', sql)\n with closing(self.conn.cursor()) as cursor:\n cursor.execute('PRAGMA synchronous = OFF')\n cursor.execute('PRAGMA journal_mode = OFF')\n for r in tqdm(records, unit=' lines', leave=False):\n if not set(self.fields).issubset(r):\n diff = self.fields.difference(r)\n sys.stderr.write('Import Records Failed: field \"%s\" not found in record\\n' % ','.join(diff))\n sys.exit(1)\n\n self.started = True\n\n if len(bulk) >= 10000:\n cursor.executemany(sql, bulk)\n self.conn.commit()\n bulk = []\n\n bulk.append({k: r[k] for k in self.fields})\n\n cursor.executemany(sql, bulk)\n self.conn.commit()\n\n def is_started(self):\n return self.started\n" } ]
20
mike123sxyocr/PHP_Memo
https://github.com/mike123sxyocr/PHP_Memo
9937134c0a95504d1758a3f18cc4f3218fac9cce
79b936622b700c95395fc9119f85850ca9ffd4e0
9bc1844fa45bc6b5db069aba868ed50d0cf9915c
refs/heads/master
2018-11-15T21:52:56.337781
2018-10-02T13:10:18
2018-10-02T13:10:18
146,541,018
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6523178815841675, "alphanum_fraction": 0.6639072895050049, "avg_line_length": 16.794116973876953, "blob_id": "2d9c7a2674401fc351664a96b9ebd1d61dc15555", "content_id": "12c3cc43c0e9a0d020596957e7a405d354b23d7b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 604, "license_type": "no_license", "max_line_length": 38, "num_lines": 34, "path": "/fileSplit.py", "repo_name": "mike123sxyocr/PHP_Memo", "src_encoding": "UTF-8", "text": "import os,sys\n\ndataSize=int()\ndata=bytes()\n\nif __name__ == \"__main__\":\n\tif len(sys.argv)!=3:\n\t\tprint(\"check your arguments please\")\n\t\tos._exit(0)\n\t\n\tfilename = str(sys.argv[1])\n\tspiltNO = int(sys.argv[2])\n\tfile=open(filename,\"rb\")\n\t\n\t#print(filename,spiltNO)\n\tfileSize=os.path.getsize(filename)\n\tsize=fileSize//spiltNO\n\t\n\tprint(fileSize)\n\tprint(size)\n\t\n\tfor x in range(spiltNO):\n\t\tdataSize=size\n\t\tif x == (spiltNO-1):\n\t\t\tdataSize=fileSize-(x*size)\n\t\t\t#print(x+1)\n\n\t\tprint(dataSize)\n\t\tdata=file.read(dataSize)\n\t\toutputfile=open(str(x+1),\"wb\")\n\t\toutputfile.write(data)\n\t\toutputfile.close()\n\t\t\n\tfile.close()" }, { "alpha_fraction": 0.6350210905075073, "alphanum_fraction": 0.6455696225166321, "avg_line_length": 18.70833396911621, "blob_id": "58190d8f976dc8e5d388194a6faf85eb67cb1605", "content_id": "bb2a3267fae8fe0339250c5ce56295fef6f816a5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 474, "license_type": "no_license", "max_line_length": 38, "num_lines": 24, "path": "/fileMerge.py", "repo_name": "mike123sxyocr/PHP_Memo", "src_encoding": "UTF-8", "text": "import os,sys\n\ndataSize=int()\n#splitdatas=[]\nsplitfile=\"\"\n\nif __name__ == \"__main__\":\n\tif len(sys.argv)<=3:\n\t\tprint(\"check your arguments please\")\n\t\tsys.exit(0)\n\t\n\tfilename = str(sys.argv[1])\n\toutputfile = open(filename,\"wb\")\n\t\n\tfor x in range((len(sys.argv)-2)):\n\t\tsplitfile = str(sys.argv[x+2])\n\t\tif not os.path.exists(splitfile):\n\t\t\tprint(splitfile+\" is not exist!\")\n\t\t\texit()\n\t\t\n\t\tinutfile=open(splitfile,\"rb\")\n\t\toutputfile.write(inutfile.read())\n\t\n\toutputfile.close()\n\t" } ]
2
virgilio21/test-JDango
https://github.com/virgilio21/test-JDango
c3743d2150ed9204cdf3cd78abcd262d457fa328
216b893a96c30574ee1eba1e6453264522ae1202
98777257d1bcfaa9d8eb5e4285937b775fbe5a9a
refs/heads/master
2020-05-14T22:07:56.126874
2019-05-06T16:41:26
2019-05-06T16:41:26
181,974,298
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6388415694236755, "alphanum_fraction": 0.647031843662262, "avg_line_length": 38.13589859008789, "blob_id": "a2c5a5dd8e673bd865f9ca050a8f31de0533f0a4", "content_id": "6a0c73b72b0f8ec720302eda42095eddbeeace10", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15262, "license_type": "no_license", "max_line_length": 107, "num_lines": 390, "path": "/pointsale/views.py", "repo_name": "virgilio21/test-JDango", "src_encoding": "UTF-8", "text": "#Django\nfrom django.shortcuts import render\nfrom django.http import Http404\n#Models\nfrom.models import Provider, Products, Users, Employees\nfrom django.contrib.auth.models import User\n#Serializers\nfrom.serializers import *\n#rest framework\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom rest_framework import status\n#Authentication\nfrom rest_framework.authtoken.models import Token\nfrom rest_framework.authentication import SessionAuthentication, BasicAuthentication, TokenAuthentication\nfrom rest_framework.permissions import IsAuthenticated\n# Create your views here.\n\n#Provider Views\nclass ProviderList(APIView):\n authentication_classes = (TokenAuthentication,)\n permission_classes = (IsAuthenticated,)\n #Retorna todos los datos del modelo provider\n def get(self, request, format=None):\n listProvider = Provider.objects.all()\n serializer = ProviderSerializer(listProvider, many=True)\n return Response(serializer.data)\n\n #Crea un objeto provider\n def post(self,request, format=None):\n serializer = ProviderSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\nclass ProviderDetail(APIView):\n authentication_classes = (TokenAuthentication,)\n permission_classes = (IsAuthenticated,)\n def get_object(self, pk):\n try:\n return Provider.objects.get(pk=pk)\n except Provider.DoesNotExist:\n raise Http404\n\n def get(self,request,pk,format=None):\n provider = self.get_object(pk)\n serializer = ProviderSerializer(provider)\n return Response(serializer.data)\n\n def put(self,request,pk,format=None):\n provider = self.get_object(pk=pk)\n serializer = ProviderSerializer(provider, data = request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.data, status=status.HTTP_400_BAD_REQUEST)\n \n def delete(self,request,pk,format=None):\n provider = self.get_object(pk=pk)\n provider.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n#Product Views\nclass ProductList(APIView):\n authentication_classes = (TokenAuthentication,)\n permission_classes = (IsAuthenticated,)\n def get(self, request, format=None):\n user=User.objects.get(id=1)\n token = Token.objects.get_or_create(user=user)\n print(token[0])\n listProduct = Products.objects.all()\n serializer = ProductViewSerializer(listProduct, many=True)\n return Response(serializer.data)\n \n def post(self, request, format=None):\n serializer = ProductSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors,status=status.HTTP_400_BAD_REQUEST)\n\nclass ProductDetail(APIView):\n authentication_classes = (TokenAuthentication,)\n permission_classes = (IsAuthenticated,) \n def get_object(self, pk):\n try:\n #.select_related('id_provider') no es necesario el nestede ralated de la serializacion lo hace\n product= Products.objects.get(pk=pk)\n print('''\n \n\n Proveedor: {}\n \n\n '''.format(product))\n return product\n except Products.DoesNotExist:\n raise Http404\n\n def get(self,request,pk,format=None):\n product = self.get_object(pk)\n serializer = ProductSerializer(product)\n return Response(serializer.data)\n \n def put(self,request,pk,format=None):\n product = self.get_object(pk)\n serializer = ProductSerializer(product, data = request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status = status.HTTP_400_BAD_REQUEST)\n \n def delete(self,request,pk,format=None):\n product=self.get_object(pk)\n serializer = ProductSerializer(product, data = request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n# Users Views\n\nclass UserList(APIView):\n authentication_classes = (TokenAuthentication,)\n permission_classes = (IsAuthenticated,)\n def get(self, request, format=None):\n users = Users.objects.all()\n serializer = UsersSerializer(users, many=True)\n return Response(serializer.data)\n\n def post(self, request, format=None):\n serializer = UsersSerializer(data = request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\nclass UserDetail(APIView):\n authentication_classes = (TokenAuthentication,)\n permission_classes = (IsAuthenticated,)\n def get_object(self, pk):\n try:\n return Users.objects.get(pk=pk)\n except Users.DoesNotExist:\n raise Http404\n\n def get(self,request,pk,format=None):\n user = self.get_object(pk)\n serializer = UsersSerializer(user)\n return Response(serializer.data)\n\n def put(self,request,pk,format=None):\n user = self.get_object(pk)\n serializer = UsersSerializer(user, data = request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n def delete(self,request,pk,format=None):\n user = self.get_object(pk)\n user.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n#Employees view\nclass EmployeList(APIView):\n authentication_classes = (TokenAuthentication,)\n permission_classes = (IsAuthenticated,)\n def get(self, request, format=None):\n employee = Employees.objects.all()\n serializer = EmployeesViewSerializer(employee, many=True)\n return Response(serializer.data)\n\n def post(self, request, format=None):\n serializer = EmployeesSerializer(data = request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\nclass EmployesDetail(APIView):\n authentication_classes = (TokenAuthentication,)\n permission_classes = (IsAuthenticated,)\n def get_object(self, pk):\n try:\n return Employees.objects.get(pk=pk)\n except Employees.DoesNotExist:\n raise Http404\n\n def get(self,request,pk,format=None):\n employee = self.get_object(pk)\n serializer = EmployeesViewSerializer(employee)\n return Response(serializer.data)\n\n def put(self,request,pk,format=None):\n employee = self.get_object(pk)\n serializer = EmployeesSerializer(employee, data = request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n def delete(self,request,pk,format=None):\n employee = self.get_object(pk)\n serializer = EmployeesSerializer(employee, data = request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(status=status.HTTP_204_NO_CONTENT)\n\nclass HistoricalList(APIView):\n authentication_classes = (TokenAuthentication,)\n permission_classes = (IsAuthenticated,)\n def get(self, request, format=None):\n historical = Historical.objects.all()\n serializer = HistoricalViewSerializer(historical, many=True)\n return Response(serializer.data)\n\n def post(self, request, format=None):\n serializer = HistoricalSerializer(data = request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\nclass HistoricalDetail(APIView):\n authentication_classes = (TokenAuthentication,)\n permission_classes = (IsAuthenticated,)\n def get_object(self, pk):\n try:\n historical = Historical.objects.get(pk=pk)\n return historical\n except historical.DoesNotExist:\n return Http404\n\n def get(self,request,pk,format=None):\n historical = self.get_object(pk)\n serializer = HistoricalViewSerializer(historical)\n return Response(serializer.data)\n\n def put(self,request,pk,format=None):\n historical = self.get_object(pk)\n serializer = HistoricalSerializer(historical, data = request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n def delete(self,request,pk,format=None):\n historical = self.get_object(pk)\n historical.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\nclass CustomerList(APIView):\n authentication_classes = (TokenAuthentication,)\n permission_classes = (IsAuthenticated,)\n def get(self, request, format=None):\n custumer = Customers.objects.all()\n serializer = CustomersSerializer(custumer, many=True)\n return Response(serializer.data)\n\n def post(self, request, format=None):\n serializer = CustomersSerializer(data = request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\nclass CustomerDetail(APIView):\n authentication_classes = (TokenAuthentication,)\n permission_classes = (IsAuthenticated,)\n def get_object(self, pk):\n try:\n return Customers.objects.get(pk=pk)\n except Customers.DoesNotExist:\n raise Http404\n\n def get(self,request,pk,format=None):\n employee = self.get_object(pk)\n serializer = CustomersSerializer(employee)\n return Response(serializer.data)\n\n def put(self,request,pk,format=None):\n custumer = self.get_object(pk)\n serializer = CustomersSerializer(custumer, data = request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n def delete(self,request,pk,format=None):\n custumer = self.get_object(pk)\n serializer = CustomersSerializer(custumer, data = request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(status=status.HTTP_204_NO_CONTENT) \n\n \nclass SaleList(APIView):\n authentication_classes = (TokenAuthentication,)\n permission_classes = (IsAuthenticated,)\n def get(self, request, format=None):\n sale = Sales.objects.all()\n serializer = SalesViewSerializer(sale, many=True)\n return Response(serializer.data)\n\n def post(self, request, format=None):\n serializer = SalesSerializer(data = request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\nclass SaleDetail(APIView):\n authentication_classes = (TokenAuthentication,)\n permission_classes = (IsAuthenticated,)\n def get_object(self, pk):\n try:\n return Sales.objects.get(pk=pk)\n except Sales.DoesNotExist:\n raise Http404\n\n def get(self,request,pk,format=None):\n sale = self.get_object(pk)\n serializer = SalesViewSerializer(sale)\n return Response(serializer.data)\n\n def put(self,request,pk,format=None):\n sale = self.get_object(pk)\n serializer = SalesSerializer(sale, data = request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n def delete(self,request,pk,format=None):\n sale = self.get_object(pk)\n serializer = SalesSerializer(sale, data = request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(status=status.HTTP_204_NO_CONTENT)\n\nclass Product_has_SaleList(APIView):\n authentication_classes = (TokenAuthentication,)\n permission_classes = (IsAuthenticated,)\n def get(self, request, format=None):\n product_has_sale = Product_has_Sale.objects.all()\n serializer = Product_has_SaleViewSerializer(product_has_sale, many=True)\n return Response(serializer.data)\n\n def post(self, request, format=None):\n serializer = Product_has_SaleSerializer(data = request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\nclass Product_has_SaleDetail(APIView):\n authentication_classes = (TokenAuthentication,)\n permission_classes = (IsAuthenticated,)\n def get_object(self, pk):\n try:\n return Product_has_Sale.objects.get(pk=pk)\n except Product_has_Sale.DoesNotExist:\n raise Http404\n\n def get(self,request,pk,format=None):\n product_has_Sale = self.get_object(pk)\n serializer = Product_has_SaleViewSerializer(product_has_Sale)\n return Response(serializer.data)\n\n def put(self,request,pk,format=None):\n product_has_Sale = self.get_object(pk)\n serializer = Product_has_SaleViewSerializer(product_has_Sale, data = request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n def delete(self,request,pk,format=None):\n product_has_Sale = self.get_object(pk)\n serializer = Product_has_SaleViewSerializer(product_has_Sale, data = request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(status=status.HTTP_204_NO_CONTENT)" }, { "alpha_fraction": 0.5428345799446106, "alphanum_fraction": 0.5529016852378845, "avg_line_length": 43.05217361450195, "blob_id": "9300a245e9ed73c4c3f446eb5f3b4a4ea459ee64", "content_id": "ea2bfa92b73b1bba0232a405e926a9b9870aebc7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5066, "license_type": "no_license", "max_line_length": 123, "num_lines": 115, "path": "/pointsale/migrations/0001_initial.py", "repo_name": "virgilio21/test-JDango", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.5 on 2019-05-05 17:32\n\nimport django.contrib.postgres.fields.jsonb\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Customers',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('customer_name', models.CharField(max_length=45)),\n ('customer_phone', models.CharField(max_length=15)),\n ('status', models.BooleanField()),\n ],\n ),\n migrations.CreateModel(\n name='Employees',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=45)),\n ('las_name', models.CharField(max_length=50)),\n ('rfc', models.CharField(max_length=45)),\n ('direction', models.CharField(max_length=100, null=True)),\n ('birthdate', models.DateField(null=True)),\n ('passwd', models.CharField(max_length=45)),\n ('telephone', models.CharField(max_length=15, null=True)),\n ('status', models.BooleanField()),\n ],\n ),\n migrations.CreateModel(\n name='Historical',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('description', models.CharField(max_length=45)),\n ('date', models.DateTimeField(auto_now=True)),\n ('employee', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pointsale.Employees')),\n ],\n ),\n migrations.CreateModel(\n name='Product_has_Sale',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('quantity', models.SmallIntegerField()),\n ('status', models.BooleanField()),\n ],\n ),\n migrations.CreateModel(\n name='Products',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('product_name', models.CharField(max_length=45)),\n ('price', models.DecimalField(decimal_places=10, max_digits=19)),\n ('amount', models.SmallIntegerField()),\n ('description', django.contrib.postgres.fields.jsonb.JSONField()),\n ('status', models.BooleanField()),\n ('image', models.ImageField(null=True, upload_to=None)),\n ],\n ),\n migrations.CreateModel(\n name='Provider',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=45)),\n ('number', models.IntegerField()),\n ],\n ),\n migrations.CreateModel(\n name='Sales',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('total_price', models.DecimalField(decimal_places=10, max_digits=19)),\n ('sale_date', models.DateField()),\n ('status', models.BooleanField()),\n ('id_customers', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pointsale.Customers')),\n ('id_employee', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pointsale.Employees')),\n ],\n ),\n migrations.CreateModel(\n name='Users',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('user_type', models.CharField(max_length=45)),\n ('description', models.CharField(max_length=450)),\n ],\n ),\n migrations.AddField(\n model_name='products',\n name='id_provider',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pointsale.Provider'),\n ),\n migrations.AddField(\n model_name='product_has_sale',\n name='id_product',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pointsale.Products'),\n ),\n migrations.AddField(\n model_name='product_has_sale',\n name='id_sale',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pointsale.Sales'),\n ),\n migrations.AddField(\n model_name='employees',\n name='user',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pointsale.Users'),\n ),\n ]\n" }, { "alpha_fraction": 0.6957681775093079, "alphanum_fraction": 0.7106366753578186, "avg_line_length": 40.650794982910156, "blob_id": "cbf977823f8c123386d883a8fdd165610e848f5a", "content_id": "aefbc2da56ab05c5de2454f630abefa8dfe7427f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2623, "license_type": "no_license", "max_line_length": 135, "num_lines": 63, "path": "/pointsale/models.py", "repo_name": "virgilio21/test-JDango", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.contrib.postgres.fields import JSONField\n\n# Create your models here.\nclass Products(models.Model):\n product_name = models.CharField(max_length=45)\n price = models.DecimalField(max_digits=19, decimal_places=10)\n amount = models.SmallIntegerField()\n description = JSONField()\n status = models.BooleanField()\n image = models.ImageField(upload_to=None, height_field=None, width_field=None, max_length=100, null=True)\n id_provider = models.ForeignKey('Provider', on_delete=models.CASCADE)\n\n def __str__(self):\n return \"Nombre {} - precio{} - descripcion {} - status {}\".format(self.product_name, self.price, self.description, self.status)\n\nclass Provider(models.Model):\n name = models.CharField(max_length=45)\n number = models.IntegerField()\n\n def __str__(self):\n return \"Nombre: {} ------ Numero: {}\".format(self.name,self.number)\n\nclass Employees(models.Model):\n name = models.CharField(max_length=45)\n las_name = models.CharField(max_length=50)\n rfc = models.CharField(max_length=45)\n direction = models.CharField(max_length=100, null=True)\n birthdate = models.DateField(auto_now=False, auto_now_add=False, null=True)\n passwd = models.CharField(max_length=45)\n telephone = models.CharField(max_length=15, null=True)\n status = models.BooleanField()\n user = models.ForeignKey('Users', on_delete=models.CASCADE)\n\n def __str__(self):\n return \"Nombre : {} --------- status: {}\".format(self.name, self.status)\n\nclass Users(models.Model):\n user_type = models.CharField(max_length=45)\n description = models.CharField(max_length=450)\n\nclass Historical(models.Model):\n description = models.CharField(max_length=45)\n date = models.DateTimeField(auto_now=True, auto_now_add=False)\n employee = models.ForeignKey('Employees', on_delete=models.CASCADE)\n\nclass Product_has_Sale(models.Model):\n id_product = models.ForeignKey('Products', on_delete=models.CASCADE)\n id_sale = models.ForeignKey('Sales', on_delete=models.CASCADE)\n quantity = models.SmallIntegerField()\n status = models.BooleanField()\n\nclass Sales(models.Model):\n total_price = models.DecimalField(max_digits=19,decimal_places=10)\n sale_date = models.DateField()\n id_employee = models.ForeignKey('Employees', on_delete=models.CASCADE)\n id_customers = models.ForeignKey('Customers', on_delete=models.CASCADE)\n status = models.BooleanField()\n\nclass Customers(models.Model):\n customer_name = models.CharField(max_length=45)\n customer_phone = models.CharField(max_length=15)\n status = models.BooleanField()" }, { "alpha_fraction": 0.6843971610069275, "alphanum_fraction": 0.6843971610069275, "avg_line_length": 30.33333396911621, "blob_id": "2fcf7efaadac845ba1d93f6ca2aaa34075011c78", "content_id": "9a6d3bea3197d2624f2eda76c1f9b7fc90b70ec8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 564, "license_type": "no_license", "max_line_length": 83, "num_lines": 18, "path": "/pointsale/admin.py", "repo_name": "virgilio21/test-JDango", "src_encoding": "UTF-8", "text": "from django.contrib import admin\n\n#models\nfrom .models import *\nfrom django.contrib.auth.models import User \n\n# Register your models here.\n@admin.register(Provider)\nclass ProviderAdmin(admin.ModelAdmin):\n list_display=('pk', 'name', 'number',)\n list_display_links = ('pk',)\n search_fields = ('pk','name', 'number')\n\n@admin.register(Products)\nclass ProductAdmin(admin.ModelAdmin):\n list_display=('pk', 'product_name', 'price', 'amount', 'status', 'id_provider')\n list_display_links = ('pk', 'product_name')\n search_fields = ('pk','product_name',)\n" }, { "alpha_fraction": 0.6485730409622192, "alphanum_fraction": 0.6485730409622192, "avg_line_length": 45.290157318115234, "blob_id": "7b1ba10066a28cd9d24754b3374de78530da25f2", "content_id": "86cd25c599913dcf0968cd5a5fc72305755e7eb9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8935, "license_type": "no_license", "max_line_length": 334, "num_lines": 193, "path": "/pointsale/serializers.py", "repo_name": "virgilio21/test-JDango", "src_encoding": "UTF-8", "text": "from rest_framework import serializers\nfrom.models import *\nfrom django.db import models\n\n#Authentication\nfrom rest_framework.authtoken.models import Token\n\nclass ProviderSerializer(serializers.ModelSerializer):\n class Meta:\n model = Provider\n fields = ('id','name','number')\n\n def create(self, validated_data):\n return Provider.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n instance.name = validated_data.get('name', instance.name)\n instance.number = validated_data.get('number', instance.email)\n instance.save()\n return instance\n\nclass ProductViewSerializer(serializers.ModelSerializer):\n id_provider = ProviderSerializer(many = False, read_only=True)\n\n class Meta:\n model= Products\n fields=('id',\"product_name\",'price','amount', 'description', 'status', 'image','id_provider')\n \nclass ProductSerializer(serializers.ModelSerializer):\n class Meta:\n model = Products\n fields=('id',\"product_name\",'price','amount', 'description', 'status', 'image','id_provider')\n \n def create(self, validated_data):\n print(validated_data,\"\"\" \n \n <- validated\n \n \"\"\")\n \"\"\"providers_data = validated_data.get('id_provider')\n provider = Provider.objects.get(pk=providers_data)\n validated_data['id_provider'] = provider\n token = Token.objects.create(user=ProductSerializer)\n print(token.key)\n return Provider.objects.create(**validated_data)\"\"\"\n products = Products(\n product_name=validated_data['product_name'],\n price=validated_data['price'],\n amount=validated_data['amount'],\n description=validated_data['description'],\n status=validated_data['status'],\n image=validated_data['image'],\n id_provider=validated_data['id_provider']\n )\n products.save()\n return products\n\n\n def update(self, instance, validated_data):\n instance.product_name = validated_data.get('product_name', instance.product_name)\n instance.price = validated_data.get('price', instance.price)\n instance.amount = validated_data.get('amount', instance.amount)\n instance.description = validated_data.get('description', instance.description)\n instance.status = validated_data.get('status', instance.status)\n instance.image = validated_data.get('image', instance.image)\n instance.id_provider = validated_data.get('id_provider', instance.id_provider)\n instance.save()\n return instance\n\nclass UsersSerializer(serializers.ModelSerializer):\n class Meta:\n model = Users\n fields = ('id','user_type','description')\n\nclass EmployeesSerializer(serializers.ModelSerializer):\n class Meta:\n model = Employees\n fields = ('id','name','las_name','rfc','direction','birthdate','passwd','telephone','status','user')\n\n def create(self, validated_data):\n employees = Employees(name=validated_data['name'],las_name=validated_data['las_name'],rfc=validated_data['rfc'], direction=validated_data['direction'],birthdate=validated_data['birthdate'], passwd=validated_data['passwd'],telephone=validated_data['telephone'], status=validated_data['status'], user=validated_data['user'])\n employees.save()\n return employees\n\n def update(self, instance, validated_data):\n instance.name = validated_data.get('name', instance.name)\n instance.las_name = validated_data.get('las_name', instance.las_name)\n instance.rfc = validated_data.get('rfc', instance.rfc)\n instance.direction = validated_data.get('direction', instance.direction)\n instance.birthdate = validated_data.get('birthdate', instance.birthdate)\n instance.passwd = validated_data.get('passwd', instance.passwd)\n instance.telephone = validated_data.get('telephone', instance.telephone)\n instance.status = validated_data.get('status', instance.status)\n instance.user = validated_data.get('user', instance.user)\n instance.save()\n return instance\n\nclass EmployeesViewSerializer(serializers.ModelSerializer):\n user = UsersSerializer(many=False, read_only=True)\n\n class Meta:\n model=Employees\n fields = ('id','name','las_name','rfc','direction','birthdate','telephone','status','user')\n\nclass HistoricalSerializer(serializers.ModelSerializer):\n class Meta:\n model = Historical\n fields = ('id','description','date','employee')\n\n def create(self, validated_data):\n historical=Historical(description=validated_data['description'], date=validated_data['date'], employee=validated_data['employee'])\n historical.save()\n return historical\n \n def update(self,instance, validated_data):\n instance.description = validated_data.get('description', instance.description)\n instance.date = validated_data.get('date', instance.date)\n instance.employee = validated_data.get('employee', instance.employee)\n return instance\n\nclass HistoricalViewSerializer(serializers.ModelSerializer):\n employee = EmployeesViewSerializer(many=False, read_only=True)\n\n class Meta:\n model = Historical\n fields = ('id','description','date','employee')\n\nclass CustomersSerializer(serializers.ModelSerializer):\n class Meta:\n model = Customers\n fields = ('id','customer_name','customer_phone','status')\n\n def create(self, validated_data):\n customer=Customers(customer_name=validated_data['customer_name'], customer_phone=validated_data['customer_phone'], status=validated_data['status'])\n customer.save()\n return customer\n \n def update(self,instance, validated_data):\n instance.customer_name = validated_data.get('customer_name', instance.customer_name)\n instance.customer_phone = validated_data.get('customer_phone', instance.customer_phone)\n instance.status = validated_data.get('status', instance.status)\n return instance\n\nclass SalesSerializer(serializers.ModelSerializer):\n class Meta:\n model = Sales\n fields = ('id','total_price','sale_date','id_employee','id_customers','status')\n\n def create(self, validated_data):\n sale=Sales(total_price=validated_data['total_price'], sale_date=validated_data['sale_date'], id_employee=validated_data['id_employee'],id_customers=validated_data['id_customers'],status=validated_data['status'])\n sale.save()\n return sale\n \n def update(self,instance, validated_data):\n instance.total_price = validated_data.get('total_price', instance.total_price)\n instance.sale_date = validated_data.get('sale_date', instance.sale_date)\n instance.id_employee = validated_data.get('id_employee', instance.id_employee)\n instance.id_customers = validated_data.get('id_customers', instance.id_customers)\n instance.status = validated_data.get('status', instance.status)\n return instance\n\nclass SalesViewSerializer(serializers.ModelSerializer):\n id_employee = EmployeesViewSerializer(many=False, read_only=True)\n id_customers= CustomersSerializer(many=False, read_only=True)\n\n class Meta:\n model = Sales\n fields = ('id','total_price','sale_date','id_employee','id_customers','status')\n\nclass Product_has_SaleSerializer(serializers.ModelSerializer):\n class Meta:\n model = Product_has_Sale\n fields = ('id','id_product','id_sale','quantity','status')\n\n def create(self, validated_data):\n product_has_sale=Product_has_Sale(id_product=validated_data['id_product'], id_sale=validated_data['id_sale'], quantity=validated_data['quantity'],status=validated_data['status'])\n product_has_sale.save()\n return product_has_sale\n \n def update(self,instance, validated_data):\n instance.id_product = validated_data.get('id_product', instance.id_product)\n instance.id_sale = validated_data.get('id_sale', instance.id_sale)\n instance.quantity = validated_data.get('quantity', instance.quantity)\n instance.status = validated_data.get('status', instance.status)\n return instance\n\nclass Product_has_SaleViewSerializer(serializers.ModelSerializer):\n id_product = ProductViewSerializer(many=False, read_only=True)\n id_sale= SalesViewSerializer(many=False, read_only=True)\n\n class Meta:\n model = Product_has_Sale\n fields = ('id','id_product','id_sale','quantity','status')\n\n" }, { "alpha_fraction": 0.7634408473968506, "alphanum_fraction": 0.7634408473968506, "avg_line_length": 17.600000381469727, "blob_id": "c133c422aac8779db9d8d9a683c2267ae10f339c", "content_id": "99f3031523df168502f26963952c71be514efd1f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 93, "license_type": "no_license", "max_line_length": 33, "num_lines": 5, "path": "/pointsale/apps.py", "repo_name": "virgilio21/test-JDango", "src_encoding": "UTF-8", "text": "from django.apps import AppConfig\n\n\nclass PointsaleConfig(AppConfig):\n name = 'pointsale'\n" }, { "alpha_fraction": 0.6935622096061707, "alphanum_fraction": 0.6935622096061707, "avg_line_length": 45.63999938964844, "blob_id": "a17778b119478eca2aa8aa8ae567b41d6548879a", "content_id": "b9ab741f6127d684861f746c1691b5509f032f01", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1165, "license_type": "no_license", "max_line_length": 78, "num_lines": 25, "path": "/pointsale/urls.py", "repo_name": "virgilio21/test-JDango", "src_encoding": "UTF-8", "text": "from django.urls import path, include\nfrom . import views\nfrom rest_framework import routers\nfrom rest_framework.urlpatterns import format_suffix_patterns\n\nurlpatterns = [\n path('Providers/',views.ProviderList.as_view()),\n path('Providers/<int:pk>',views.ProviderDetail.as_view()),\n path('Products/',views.ProductList.as_view()),\n path('Producst/<int:pk>',views.ProductDetail.as_view()),\n path('Users/', views.UserList.as_view()),\n path('Users/<int:pk>', views.UserDetail.as_view()),\n path('Employees/', views.EmployeList.as_view()),\n path('Employees/<int:pk>', views.EmployesDetail.as_view()),\n path('Historicals/', views.HistoricalList.as_view()),\n path('Historicals/<int:pk>', views.HistoricalDetail.as_view()),\n path('Customers/', views.CustomerList.as_view()),\n path('Customers/<int:pk>', views.CustomerDetail.as_view()),\n path('Sales/', views.SaleList.as_view()),\n path('Sales/<int:pk>', views.SaleDetail.as_view()),\n path('Product_has_sale/', views.Product_has_SaleList.as_view()),\n path('Product_has_sale/<int:pk>', views.Product_has_SaleDetail.as_view()),\n]\n\nurlpatterns = format_suffix_patterns(urlpatterns)" } ]
7
nghiemIUH/lpt
https://github.com/nghiemIUH/lpt
83000dd2ea53b091c80c22dcad23e5c09214970c
408a39273e341cd87bd1914872554fa5568e5ef0
846d232ace25bb20a2582dfca9d634aed6640382
refs/heads/main
2023-07-18T06:19:26.748820
2021-08-27T09:51:51
2021-08-27T09:51:51
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.719763994216919, "alphanum_fraction": 0.719763994216919, "avg_line_length": 32.20000076293945, "blob_id": "9d53432199331ade9686d168ec2340953e059812", "content_id": "b958f001918edb09df8f90f7e343c02988476fc3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 339, "license_type": "no_license", "max_line_length": 78, "num_lines": 10, "path": "/test.py", "repo_name": "nghiemIUH/lpt", "src_encoding": "UTF-8", "text": "from load_model import Model_Mnist, Model_Written\nimport tensorflow as tf\nmnist = Model_Mnist()\nwritten = Model_Written()\nmnist.load_model('model_mnist/model.ckpt.meta', 'model_mnist')\nwritten.load_model('model_hand_written/model.ckpt.meta', 'model_hand_written')\n\nt=tf.Graph()\nwith t.as_default():\n sess = tf.Session(graph = t)\n \n\n\n" }, { "alpha_fraction": 0.5696689486503601, "alphanum_fraction": 0.6058506369590759, "avg_line_length": 25.40816307067871, "blob_id": "b25a45f3fee68cd339447bc1fe93473487ee1fa4", "content_id": "433299fdc2bb68d021febb83183bc96daff33b41", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1299, "license_type": "no_license", "max_line_length": 78, "num_lines": 49, "path": "/detect.py", "repo_name": "nghiemIUH/lpt", "src_encoding": "UTF-8", "text": "from YOLO import BienSo\nimport cv2\nimport numpy as np\nfrom imutils import resize\nfrom load_model import Model_Mnist, Model_Written\n\nmnist = Model_Mnist()\nwritten = Model_Written()\n\nmnist.load_model('model_mnist/model.ckpt.meta', 'model_mnist')\nwritten.load_model('model_hand_written/model.ckpt.meta', 'model_hand_written')\n\n\nb = BienSo('yolov4-tiny.cfg', 'yolov4-tiny_3000.weights')\ndef image(img_path):\n img = cv2.imread(img_path)\n bien_so = b.cat_bien_so(img)\n list_tren, list_duoi = b.lay_gia_tri(img)\n result = ''\n for i in list_tren[:2]:\n i = i.reshape(1, 784)\n pred = mnist.predict(i)\n result += str(pred)\n result += '-'\n\n chu = list_tren[-2]\n chu = chu.reshape(1, 784)\n pred = written.predict(chu)\n result += chr(pred+65)\n\n so_cuoi = list_tren[-1]\n so_cuoi = so_cuoi.reshape(1, 784)\n pred = mnist.predict(so_cuoi)\n result += str(pred)\n result += '/'\n for i in list_duoi:\n i = i.reshape(1, 784)\n pred = mnist.predict(i)\n result += str(pred)\n\n cv2.putText(img, result, (int(bien_so[0]), int(\n bien_so[1]-10)), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0), 2)\n cv2.imshow('Bien so xe', img)\n cv2.waitKey()\n cv2.destroyAllWindows()\n\nif __name__ == \"__main__\":\n\n image('test1.jpg')\n \n" }, { "alpha_fraction": 0.46330517530441284, "alphanum_fraction": 0.49524813890457153, "avg_line_length": 33.436363220214844, "blob_id": "ec73fe926a9729094926d52e5173b9d34b066fc8", "content_id": "0325494c0fdfd341e84e98901bc3fb2ff51df9e6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3788, "license_type": "no_license", "max_line_length": 75, "num_lines": 110, "path": "/YOLO.py", "repo_name": "nghiemIUH/lpt", "src_encoding": "UTF-8", "text": "import cv2\nimport numpy as np\nimport imutils\nfrom math import sqrt\n\n\nclass BienSo:\n def __init__(self, config, weight):\n self.config = config\n self.weight = weight\n\n def get_anh(self):\n return cv2.imread(self.img)\n\n def get_output_layers(self, net):\n layer_names = net.getLayerNames()\n output_layers = [layer_names[i[0] - 1]\n for i in net.getUnconnectedOutLayers()]\n return output_layers\n \n def cat_bien_so(self, image):\n Width = image.shape[1]\n Height = image.shape[0]\n scale = 0.00392\n net = cv2.dnn.readNet(self.weight, self.config)\n blob = cv2.dnn.blobFromImage(\n image, scale, (416, 416), (0, 0, 0), True, crop=False)\n net.setInput(blob)\n outs = net.forward(self.get_output_layers(net))\n confidences = []\n boxes = []\n conf_threshold = 0.5\n nms_threshold = 0.4\n for out in outs:\n for detection in out:\n scores = detection[5:]\n class_id = np.argmax(scores)\n confidence = scores[class_id]\n if confidence > 0.5:\n center_x = int(detection[0] * Width)\n center_y = int(detection[1] * Height)\n w = int(detection[2] * Width)\n h = int(detection[3] * Height)\n x = center_x - w / 2\n y = center_y - h / 2\n confidences.append(float(confidence))\n boxes.append([x, y, w, h])\n\n if(len(boxes) == 0):\n return None\n indices = cv2.dnn.NMSBoxes(\n boxes, confidences, conf_threshold, nms_threshold)\n for i in indices:\n i = i[0]\n box = boxes[i]\n x = abs(box[0])\n y = abs(box[1])\n w = abs(box[2])\n h = abs(box[3])\n bien_so = image[round(y):round(y+h)+5, round(x):round(x+w)+5]\n return [x, y, bien_so]\n\n def tim_so(self, img):\n edged = cv2.Canny(img, 30, 200)\n contours, _ = cv2.findContours(\n edged, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n list_so = []\n if len(contours) > 0:\n i = 0\n for contour in contours:\n (x, y, w, h) = cv2.boundingRect(contour)\n aspect_ratio = float(w)/h\n if 0.2 < aspect_ratio < 0.5 and 0.5 < h/img.shape[0] < 0.8:\n so = img[y:y+h, x:x+w]\n so = np.pad(so, 20)\n so = cv2.GaussianBlur(so, (3, 3), 0)\n kernel = np.ones((3, 3), np.uint8)\n dilation = cv2.dilate(so, kernel=kernel)\n erosion = cv2.erode(dilation, kernel=np.ones(\n (5, 5), np.uint8), iterations=1)\n so = cv2.resize(erosion, (28, 28))\n list_so.append([x, so])\n\n list_so = sorted(list_so, key=lambda x: x[0])\n return [i[1] for i in list_so]\n\n def lay_gia_tri(self, img):\n bien_so = self.cat_bien_so(img)\n image = cv2.cvtColor(bien_so[2], cv2.COLOR_BGR2GRAY)\n\n thre = cv2.threshold(\n image, 127, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]\n thre = cv2.bitwise_not(thre)\n thre = imutils.resize(thre, width=400)\n\n kernel = np.ones((3, 3), np.uint8)\n\n dilation = cv2.dilate(thre, kernel=kernel, iterations=1)\n erosion = cv2.erode(dilation, kernel=kernel, iterations=1)\n\n blur = cv2.medianBlur(dilation, 5)\n\n h, w = blur.shape\n img_tren = blur[:h//2+10, :]\n img_duoi = blur[h//2-10:, :]\n\n list_tren = self.tim_so(img_tren)\n list_duoi = self.tim_so(img_duoi)\n\n return list_tren, list_duoi\n" }, { "alpha_fraction": 0.48177215456962585, "alphanum_fraction": 0.5172151923179626, "avg_line_length": 39.721649169921875, "blob_id": "168a3e31518f55056d819bcf05150d7ce4126727", "content_id": "c3e3c70c8f15e62851fd4bfb8fa92fc736cb9176", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3950, "license_type": "no_license", "max_line_length": 70, "num_lines": 97, "path": "/load_model.py", "repo_name": "nghiemIUH/lpt", "src_encoding": "UTF-8", "text": "import tensorflow as tf\nimport numpy as np\n\n\nclass Model_Mnist:\n def __init__(self):\n self.graph_mnist = tf.Graph()\n\n def load_model(self, meta_file, folder_name):\n with self.graph_mnist.as_default():\n self.sess_mnist = tf.Session(graph=self.graph_mnist)\n saver = tf.compat.v1.train.import_meta_graph(meta_file)\n saver.restore(self.sess_mnist,\n tf.train.latest_checkpoint(folder_name))\n graph_1 = tf.compat.v1.get_default_graph()\n\n self.weights = {\n 'w1': graph_1.get_tensor_by_name(\"w1:0\"),\n 'w2': graph_1.get_tensor_by_name(\"w2:0\"),\n 'w3': graph_1.get_tensor_by_name(\"w3:0\"),\n 'w4': graph_1.get_tensor_by_name(\"w4:0\"),\n \n }\n self.biases = {\n 'b1': graph_1.get_tensor_by_name(\"b1:0\"),\n 'b2': graph_1.get_tensor_by_name(\"b2:0\"),\n 'b3': graph_1.get_tensor_by_name(\"b3:0\"),\n 'b4': graph_1.get_tensor_by_name(\"b4:0\"),\n \n }\n\n def net(self, x, weights, biases):\n dense_1 = tf.matmul(x, weights['w1'])+biases['b1']\n relu_1 = tf.nn.relu(dense_1)\n dense_2 = tf.matmul(relu_1, weights['w2'])+biases['b2']\n relu_2 = tf.nn.relu(dense_2)\n dense_3 = tf.matmul(relu_2, weights['w3'])+biases['b3']\n relu_3 = tf.nn.relu(dense_3)\n dense_4 = tf.matmul(relu_3, weights['w4'])+biases['b4']\n return dense_4\n\n def predict(self, x):\n with self.sess_mnist.as_default():\n with self.graph_mnist.as_default():\n X = tf.placeholder(tf.float32, shape=(None, 784))\n pred = self.net(X, self.weights, self.biases)\n result = self.sess_mnist.run(pred, feed_dict={X: x})\n return np.argmax(result[0])\n\n\nclass Model_Written:\n def __init__(self):\n\n self.graph_written = tf.Graph()\n\n def load_model(self, meta_file, folder_name):\n with self.graph_written.as_default():\n self.sess_written = tf.Session(graph=self.graph_written)\n saver = tf.compat.v1.train.import_meta_graph(meta_file)\n saver.restore(self.sess_written,\n tf.train.latest_checkpoint(folder_name))\n graph_2 = tf.compat.v1.get_default_graph()\n\n self.weights = {\n 'w1': graph_2.get_tensor_by_name(\"W1:0\"),\n 'w2': graph_2.get_tensor_by_name(\"W2:0\"),\n 'w3': graph_2.get_tensor_by_name(\"W3:0\"),\n 'w4': graph_2.get_tensor_by_name(\"W4:0\"),\n 'w5': graph_2.get_tensor_by_name(\"W5:0\"),\n }\n self.biases = {\n 'b1': graph_2.get_tensor_by_name(\"B1:0\"),\n 'b2': graph_2.get_tensor_by_name(\"B2:0\"),\n 'b3': graph_2.get_tensor_by_name(\"B3:0\"),\n 'b4': graph_2.get_tensor_by_name(\"B4:0\"),\n 'b5': graph_2.get_tensor_by_name(\"B5:0\"),\n }\n\n def net(self, x, weights, biases):\n dense_1 = tf.matmul(x, weights['w1']) + biases['b1']\n relu_1 = tf.nn.relu(dense_1)\n dense_2 = tf.matmul(relu_1, weights['w2'])+biases['b2']\n relu_2 = tf.nn.relu(dense_2)\n dense_3 = tf.matmul(relu_2, weights['w3'])+biases['b3']\n relu_3 = tf.nn.relu(dense_3)\n dense_4 = tf.matmul(relu_3, weights['w4'])+biases['b4']\n relu_4 = tf.nn.relu(dense_4)\n dense_5 = tf.matmul(relu_4, weights['w5'])+biases['b5']\n return tf.nn.softmax(dense_5)\n\n def predict(self, x):\n with self.sess_written.as_default():\n with self.graph_written.as_default():\n X = tf.placeholder(tf.float32, shape=(None, 784))\n pred = self.net(X, self.weights, self.biases)\n result = self.sess_written.run(pred, feed_dict={X: x})\n return np.argmax(result[0])\n" } ]
4
jakbellamy/Scrape_Tweeter
https://github.com/jakbellamy/Scrape_Tweeter
16db3d26299cc335693b23dccacecbda563095bc
4b218a71c010a364066f5b1ba7bc4f95a71aab4f
78804bef6bdac069e3ef43b537a961b93f974dde
refs/heads/main
2022-12-25T19:36:41.873162
2020-10-03T05:30:05
2020-10-03T05:30:05
300,796,170
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.48659002780914307, "alphanum_fraction": 0.49395814538002014, "avg_line_length": 35.09574508666992, "blob_id": "d67e721f40a53b24022d0a611c83eee14c885aef", "content_id": "06b02dfd7965cc18909359e5d8f0649ac38fba7f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3393, "license_type": "no_license", "max_line_length": 156, "num_lines": 94, "path": "/src/Yeeter.py", "repo_name": "jakbellamy/Scrape_Tweeter", "src_encoding": "UTF-8", "text": "from selenium import webdriver\nimport pandas as pd\nimport time\nimport json\n\nclass Yeeter:\n def __init__(self, config_path):\n self.config = self.read_config(config_path)\n self.tweets = []\n\n def scrape_user(self, user, num_tweets, until_point='', alt_date=None):\n user = user.lower()\n url = f\"https://twitter.com/search?q=from%3A{user}{until_point}&src=typed_query&f=live\"\n\n driver = webdriver.Chrome(self.config['selenium-driver'])\n driver.get(url) if not alt_date else driver.get(\n f\"https://twitter.com/search?q=from%3A{user}{self.until(alt_date)}&src=typed_query&f=live\"\n )\n\n time.sleep(0.3)\n\n no_load_top, no_load_bottom = False, False\n while len(self.tweets) <= num_tweets and not no_load_top:\n counter = 0\n while counter <= 2000 and not no_load_bottom:\n try:\n driver.execute_script(\"\"\"\n window.scrollTo(0, document.body.scrollHeight);\n \"\"\")\n time.sleep(0.5)\n tweet_chunk = driver.execute_script(\"\"\"\n chunk = []\n\n let feed = Array.prototype.slice.call(document.querySelector(\"[aria-label='Timeline: Search timeline']\").firstElementChild.children)\n for(tweet of feed){\n content = tweet.querySelector(\"[lang='en']\")\n date = tweet.querySelector('Time')\n if(content){\n chunk.push([date.dateTime, content.innerText])\n }\n }\n return chunk\n \"\"\")\n self.tweets += [tweet for tweet in tweet_chunk if not tweet in self.tweets and self.is_clean(tweet[1])]\n counter += len(tweet_chunk)\n time.sleep(0.15)\n except:\n print('Processing Ended: Broke Child Loop')\n no_load_bottom = True\n break\n print(len(self.tweets))\n try:\n driver.quit()\n time.sleep(0.7)\n new_date = self.tweets[-1][0].split('T')[0]\n if new_date == alt_date:\n no_load_top = True\n break\n else:\n self.scrape_user(user, num_tweets, alt_date=new_date)\n except:\n print('Processing Ended: Broke Parent Loop')\n no_load_top = True\n break\n driver.quit()\n pass\n\n def to_df(self):\n return pd.DataFrame(self.tweets, columns=['Date', 'Tweet'])\n\n def to_csv(self, title, clear_tweets=False):\n df = self.to_df()\n df.to_csv(f'./{title}.csv', index=False, encoding='utf-8-sig')\n if clear_tweets:\n self.tweets = []\n\n @staticmethod\n def read_config(config_path):\n with open(config_path, 'r') as file:\n config = json.load(file)\n return config\n\n @staticmethod\n def until(until_point):\n if until_point:\n return '%05until%3A' + until_point\n else:\n return ''\n\n @staticmethod\n def is_clean(tweet):\n if '\\n' in [letter for letter in tweet]:\n return False\n return True\n" }, { "alpha_fraction": 0.699999988079071, "alphanum_fraction": 0.7307692170143127, "avg_line_length": 17.428571701049805, "blob_id": "44f9a8cc6519b3f44de523ee2c9713c37adad4ac", "content_id": "a41a461dcd09734ab6bdde622c2bda9fa34bdcd3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 130, "license_type": "no_license", "max_line_length": 33, "num_lines": 7, "path": "/run.py", "repo_name": "jakbellamy/Scrape_Tweeter", "src_encoding": "UTF-8", "text": "from src.Yeeter import Yeeter\n\n\n\ntwitter = Yeeter('yeeter.json')\ntwitter.scrape_user('dril', 1500)\ntwitter.to_csv('dril_tweets')\n\n" } ]
2
hemanzur/Behavior_analysis
https://github.com/hemanzur/Behavior_analysis
b84f6e5263163853ae781c624d07cbfd4a2dd594
cc9b119d18667e4189de4abb4bc6325693aa18d1
a5c299c394125de320740e9e6e510bfd478f9b83
refs/heads/master
2020-03-29T19:26:10.141784
2013-11-19T23:01:19
2013-11-19T23:01:19
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.43530037999153137, "alphanum_fraction": 0.4519432783126831, "avg_line_length": 37.61846923828125, "blob_id": "fe7a720c9fd9df818e12d00ffac0e917beeca327", "content_id": "2e3f0b9655a5c58c3097f463eac2df098501ac8e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 41399, "license_type": "no_license", "max_line_length": 120, "num_lines": 1072, "path": "/m_bhvfuncs.py", "repo_name": "hemanzur/Behavior_analysis", "src_encoding": "UTF-8", "text": "#bhvfuncs.py\n\nimport scipy.io as spio\nimport numpy as np\nimport os, re, string\nfrom glob import glob\nfrom PyQt4 import QtGui\nimport ipdb\n\n########################################################################################################################\n\ndef mpc2beh(filesdir = '', savedir = '', overwrite = False, calcParams = True):\n\n '''Routine to transform the MPC files into matlab files\\n\n Input:\n *filesdir=can be a direcotyr or a filename\n *savedir= must be a directory\n *overwrite= whether to overwrite the existing file or not\n Output: data.'''\n\n if not filesdir:\n filesdir = str(QtGui.QFileDialog.getExistingDirectory(caption = 'MPC Files Dir'))\n if not filesdir: return\n\n if not savedir or not os.path.isdir(savedir):\n savedir = str(QtGui.QFileDialog.getExistingDirectory(caption = 'Save dir'))\n if not savedir: return\n\n if os.path.isdir(filesdir):\n files = glob(os.path.join(filesdir, '!*.Subject*'))\n\n if files: files.sort()\n else:\n print 'Ther were no files in here !'\n return\n\n for j,k in enumerate(files):\n filesdir, files[j] = os.path.split(k)\n\n elif os.path.isfile(filesdir):\n if not re.search('!.*.Subject .*', filesdir):\n raise SystemExit('It seems that this is not an MPC file !')\n filesdir, files = os.path.split(filesdir)\n files = [files]\n\n pd = QtGui.QProgressDialog('Running MPC to beh ...', 'Cancel', 0, len(files))\n pd.setWindowTitle('Converting Med PC files to to beh ...')\n pd.setGeometry(500, 500, 500, 100)\n pd.show()\n\n for k in files:\n\n YY = k[3:5]\n MM = k[6:8]\n DD = k[9:11]\n HH = k[12:14]\n MI = k[15:17]\n RAT = k[k.find('Subject')+8:]\n\n filename = '%s_%s%s%s_%s%s*.beh' % (RAT,YY,MM,DD,HH,MI)\n\n # update the progress bar and also check if the conversion was canceled\n pd.setLabelText('Processing... ' + k)\n pd.setValue(pd.value()+1)\n QtGui.QApplication.processEvents()\n if pd.wasCanceled():\n return\n\n if glob(os.path.join(savedir,filename)) != [] \\\n and overwrite == 0 \\\n or not os.path.isfile(os.path.join(filesdir,k)):\n continue\n\n fid = open(os.path.join(filesdir, k), 'rU')\n w = fid.readlines()\n fid.close()\n\n Data = {}\n\n for m, n in enumerate(w):\n if n == '\\n': continue\n\n st = n.split(': ')\n st[0] = st[0].replace(' ','')\n st[1] = st[1].replace('\\n','')\n\n if st[0] in ['File','Subject','MSN']:\n Data[st[0]] = str(st[1].strip())\n elif st[0].find('Date')!=-1 or st[0].find('Time')!=-1:\n Data[st[0]] = [int(x) for x in re.split('[:/]',st[1].strip())]\n elif st[0] in ['Box','Experiment','Group']:\n if len(st[1].strip()) > 0:\n Data[st[0]] = int(st[1].strip())\n else:\n Data[st[0]] = 0\n\n if st[0] == 'MSN': break\n\n for l,n in enumerate(w[m+1:]):\n if n.find('Start') != -1 or n == '\\n':\n break\n e = n.strip().replace('\\n','')\n e = e.split(':')\n\n if e[0] in string.uppercase:\n curvar = e[0]\n Data[curvar] = []\n continue\n \n elif e[0].find('\\\\') != -1:\n Data['Comments'] = e[0]\n continue\n \n else:\n e = e[1].strip()\n e = re.split(' +', e)\n e = [float(x) for x in e]\n if curvar == 'X' and e == [0,0,0,0,0]:\n continue\n else:\n Data[curvar] = Data[curvar] + e\n\n Data['X'] = np.array(Data['X'])\n if Data['X'].size < 10: continue\n Data['X'] = Data['X'][ np.nonzero(Data['X']) ]\n ECodes = np.floor( Data['X']/1000000 )\n Data['X'] = Data['X'] - ECodes*1000000\n X = np.unique(ECodes)\n X.sort()\n Data['X'] = Data['X'] - Data['X'][0]\n\n #create a np.object to be saved as a cell array in matlab\n Data['EventTS'] = []\n Data['EventCode'] = X\n Data['EventName'] = []\n\n TableEvent = BehMapping(GetMapping(Data))\n TableEventName = TableEvent[:,0]\n TableEventCode = TableEvent[:,1]\n\n for l in X:\n indx = np.flatnonzero(ECodes == l)\n if np.any(indx):\n Data['EventTS'].append(Data['X'][ECodes == l])\n Data['EventName'].append(TableEventName[l == TableEventCode][0])\n\n Data['EventTS'] = np.array(Data['EventTS'] , dtype = np.object, ndmin=1)\n Data['EventCode'] = np.array(Data['EventCode'], dtype = np.object, ndmin=1)\n Data['EventName'] = np.array(Data['EventName'], dtype = np.object, ndmin=1)\n \n if calcParams:\n Data = GetBhvParams(Data)\n \n #eliminate the variables coming from MedPC\n Data.pop('X')\n\n savefile = os.path.join(savedir, filename.replace('*', '_' + Data['MSN']))\n\n if os.path.isfile(savefile):\n os.remove(savefile)\n\n spio.savemat(savefile, {'Data':Data}, format = '5',\n appendmat=False, oned_as='row')\n\n########################################################################################################################\n\ndef GetMapping(Data):\n\n if re.search('(CWHT|CENTWHT|[0-9]{1,2}K)[R,L]_[0-9]{1,2}K[R,L]|[R,L]Sip', string.upper(Data['MSN'])) or \\\n re.search('UNBIAS|DRIFT|LEFT|DU2|LITI|REPCATCH|2T|NOISE|CLICK', string.upper(Data['MSN'])) or \\\n np.any(Data['EventCode'] > 50):\n return 2\n else:\n return 1\n\n########################################################################################################################\n\ndef BehMapping(Mapping):\n '''\n This function returns a table of names and codes, depending on the mapping\n Mapping is an integer that can be 1 or 2\n '''\n\n TableEvent1 = np.array([['SessionStart', 17],\n ['IRLickOn', 21],\n ['IRLickOff', 22],\n ['IRCTROn', 23],\n ['IRCTROff', 24],\n ['IRRTOn', 25],\n ['IRRTOff', 26],\n ['IRLTOn', 27],\n ['IRLTOff', 28],\n ['OUT1HL', 31],\n ['OUT2SL', 32],\n ['OUT3CTRRED', 33],\n ['OUT4CTRWHTOn', 34],\n ['OUT5RTRED', 35],\n ['OUT6RTGRN', 36],\n ['OUT7RTYLW', 37],\n ['OUT8EMPTY', 38],\n ['OUT9LTRED', 39],\n ['OUT10LTGRN', 40],\n ['OUT11LTYLW', 41],\n ['OUT12NOISE', 42],\n ['OUT13SOL1', 43],\n ['OUT14SOL2', 44],\n ['OUT15SOL3', 45],\n ['OUT16SOL4',46],\n ['SOUND1', 47],\n ['SOUND2', 48],\n ['SOUND3', 49],\n ['OUT4CTRWHTOff',50]], dtype = np.object)\n\n TableEvent2=np.array([['SessionStart',17],\n ['RightLickOn',21],\n ['RightlickOff',22],\n ['RightPokeOn',23],\n ['RightPokeOff',24],\n ['CentPokeOn',25],\n ['CentPokeOff',26],\n ['LeftLickOn',27],\n ['LeftLickOff',28],\n ['LeftPokeOn',29],\n ['LeftPokeOff',30],\n ['HouseLightOn',31],\n ['HouseLightOff',32],\n ['RightSipperLightOn',33],\n ['RightSipperLightOff',34],\n ['RedFrontLightOn',35],\n ['RedFrontLightOff',36],\n ['WhiteFrontLightOn',37],\n ['WhiteFrontLightOff',38],\n ['NPRedLightOn',39],\n ['NPRedLightOff',40],\n ['NPGreenLightOn',41],\n ['NPGreenLightOff',42],\n ['NPYellowLightOn',43],\n ['NPYellowLightOff',44],\n ['LeftSipperLightOn',45],\n ['LeftSipperLightOff',46],\n ['NoiseOn',47],\n ['NoiseOff',48],\n ['Solnd1',49],\n ['Solnd2',50],\n ['Solnd3',51],\n ['Solnd4',52],\n ['Sound1',53],\n ['Sound2',54],\n ['Catch',55]],dtype=np.object)\n\n if Mapping == 1:\n return TableEvent1\n elif Mapping == 2:\n return TableEvent2\n\n########################################################################################################################\n\ndef GetBhvParams(Data):\n '''Get behavioral parameters\\n\n Input: behavioral Data dictionary or file\\n\n Output: behavioral data dict with all behavioral parameters calculated'''\n\n try:\n if os.path.isfile(Data):\n Data = LoadBehFile(Data)\n except TypeError:\n if type(Data)!=dict:\n raise NameError('Data is neither file nor dictionary !')\n\n #===========================================================================================================#\n\n if GetMapping(Data) == 1:\n\n EvtName1=['SOUND1','SOUND2','IRRTOn','IRRTOff','IRCTROn','OUT4CTRWHTOn','OUT13SOL1','IRLickOn']\n EvtName2=['Tone1', 'Tone2', 'NpIn', 'NpOut', 'RpIn', 'CLight', 'Solnd1', 'Lick']\n Vars={}\n for k,l in zip(EvtName1,EvtName2):\n indx = np.flatnonzero(Data['EventName']==k)\n if indx:\n Vars[l] = Data['EventTS'][indx][0]\n\n Stims=['Tone1','Tone2','CLight']\n\n for j,k in enumerate(Stims):\n if k in Vars.keys() and 'Lick' in Vars.keys():\n\n CurStim = 'Stim' + str(j)\n Data[CurStim] = {}\n Stim = Vars[k]\n # First find the hits and the misses\n # Then find the closest event to each one of the Hits\n HitsParams = GetHits(Stim, Vars['Lick'])\n RTT = HitsParams['ThirdLickHitTS'] - HitsParams['StimHitsTS']\n RT4, _ = DistXY(HitsParams['StimHitsTS'], Vars['Lick'])\n RT4 = RTT - RT4\n\n if 'NpIn' in Vars.keys():\n RT0, RT0Indx = DistYX( HitsParams['StimHitsTS'], Vars['NpIn'] )\n Data[CurStim]['RT0'] = RT0\n\n if 'NpOut' in Vars.keys():\n RT1, RT1Indx = DistXY( HitsParams['StimHitsTS'], Vars['NpOut'] )\n Data[CurStim]['RT1'] = RT1\n\n if ('RpIn' in Vars.keys()) and ('NpOut' in Vars.keys()):\n RT2, RT2Indx = DistXY(Vars['NpOut'][RT1Indx], Vars['RpIn'])\n RT3, RT3Indx = DistXY(Vars['RpIn'][RT2Indx], Vars['Lick'])\n Data[CurStim]['RT2'] = RT2\n Data[CurStim]['RT3'] = RT3\n Data[CurStim]['RT4'] = RT4\n\n Data[CurStim]['Descr'] = str(k)\n Data[CurStim]['HitsTS'] = HitsParams['StimHitsTS']\n Data[CurStim]['HitsIndx'] = HitsParams['StimHitsIndx']\n Data[CurStim]['MissTS'] = HitsParams['StimMissTS']\n Data[CurStim]['MissIndx'] = HitsParams['StimMissIndx']\n Data[CurStim]['RTT'] = RTT\n Data[CurStim]['StimTS'] = Stim\n Data[CurStim]['RT4'] = HitsParams['ThirdLickHitTS'] - HitsParams['FirstLickHitTS']\n\n #Copy all the time stamps of the events to make easier calculations for rasters\n for x in ['NpIn', 'NpOut', 'RpIn', 'Solnd1', 'Lick']:\n if Vars.has_key(x):\n Data[CurStim][x] = Vars[x]\n if Vars.has_key('Solnd1'):\n Data[CurStim]['Solnd'] = Vars['Solnd1']\n\n #===========================================================================================================#\n\n elif GetMapping(Data) == 2:\n\n # First get the table mapping\n TableEvent = BehMapping( GetMapping(Data) )\n \n # create a table to remap names and codes\n EvtNames = np.array([['NPLed', 43],\n ['RSipLight', 33],\n ['LSipLight', 45],\n ['WhtFLight', 37],\n ['RedFLight', 35],\n ['Noise', 47],\n ['Tone1', 53],\n ['Tone2', 54],\n ['Catch', 55],\n ['NpOut', 26],\n ['NpIn', 25],\n ['RRpIn', 23],\n ['LRpIn', 29],\n ['RLick', 21],\n ['LLick', 27],\n ['Solnd1', 49],\n ['Solnd2', 50],\n ['Solnd3', 51],\n ['Solnd4', 52]], dtype = np.object)\n\n # get the variables present in the data structure into a dictionary\n # to make its manipulation easier\n Vars = {}\n for name, code in EvtNames:\n indx = np.flatnonzero(TableEvent[:,1] == code)\n if TableEvent[indx,0] in Data['EventName']:\n Vars[name] = Data['EventTS'][ Data['EventName'] == TableEvent[indx,0] ][0]\n\n #Check the association between stimuli and reward\n #If data has the 'S' field it builds an array of Stim - Resp association\n if Data.has_key('S'):\n \n # fill the S array and reshape it\n S = np.array(Data['S'])\n if S.size%10 == 0:\n S = S.reshape([S.size/10, 10])\n else:\n n = int(np.ceil(S.size/10.0)*10)\n S = np.concatenate([S, np.zeros([n-S.size])]).reshape([n/10,10])\n \n Data['S'] = S\n \n # iterate over the columns that contain stimuli -resp associations:\n # element 1 contains the association: 1-->right, 2-->left, 3-->miss (catch)\n # The idea is to create a table with the following columns:\n # stim-resp association; stim name, stim code, correct lick, incorrect lick\n StimResp = []\n for k in np.flatnonzero(S[0,:]):\n # stim -response association\n if S[:,k][0] == 1:\n StimResp.append([1, EvtNames[EvtNames[:,1] == S[:,k][1], 0][0],\n S[:,k][1], 'RLick', 'LLick'])\n \n elif S[:,k][0] == 2:\n StimResp.append([2, EvtNames[EvtNames[:,1] == S[:,k][1], 0][0],\n S[:,k][1], 'LLick', 'RLick'])\n \n elif S[:,k][0] == 3:\n StimResp.append([3, EvtNames[EvtNames[:,1] == S[:,k][1], 0][0],\n S[:,k][1], 'Miss', 'Miss'])\n\n #If not, create one by default\n else:\n StimResp = [[1, 'Tone1', 53, 'RLick', 'LLick'],\n [2, 'Tone2', 54, 'LLick', 'RLick'],\n [3, 'Catch', 55, 'Miss' , 'Miss' ]]\n\n #main loop that extracts and calculates all the parameters for a given stimuli\n for j, k in enumerate(StimResp):\n\n # set current stimulus name\n CurStim = 'Stim' + str(j)\n\n # create an empty dictionary to hold the data\n Data[CurStim] = {}\n\n # check whether the StimTS and the lickTS are present and that this\n # is not a catch stimuli\n if k[1] in Vars and k[3] in Vars and k[0] != 3:\n \n # only add the stimuli that were presented after the first yellow LED in the nose poke\n if 'NPLed' in Vars:\n StimTS = Vars[ k[1] ][ Vars[ k[1] ] > Vars['NPLed'][0] ]\n else:\n StimTS = Vars[k[1]]\n \n ### ADD ONLY THE VALID STIMULI --> THOSE THAT HAVE AT LEAST\n ### 3 SECONDS FROM THE LAST TIMESTAMP\n if (Data['X'][-1] - StimTS[-1]) < 3.0:\n StimTS = StimTS[0:-1]\n \n hLick = Vars[ k[3] ]\n\n # First find the hits\n HitsParams = GetHits(StimTS, hLick)\n\n # check whether are there any incorrects in the vars dictionary\n if k[4] in Vars:\n\n # get the parameters for the incorrects\n eLick = Vars[ k[4] ]\n IncParams = GetHits(StimTS, eLick)\n\n # for the case of no errors\n if HitsParams['StimHitsTS'].size > 0 and IncParams['StimHitsIndx'].size == 0:\n HitsTS = HitsParams['StimHitsTS']\n HitsIndx = HitsParams['StimHitsIndx']\n MissTS = HitsParams['StimMissTS']\n MissIndx = HitsParams['StimMissIndx']\n ErrTS = np.array([])\n ErrIndx = np.array([])\n\n # for the case of Hits and Errors\n elif HitsParams['StimHitsTS'].size > 0 and IncParams['StimHitsIndx'].size > 0:\n\n # Look for the Stim indices that are shared by Hits and Errors\n indx = np.intersect1d(HitsParams['StimHitsIndx'],\n IncParams ['StimHitsIndx'])\n\n # When there is intersection between hits and errors\n # this can happen if the animal licks in one side and then the other\n # obtain the true hits and the true errors\n if indx.size > 0:\n\n # Get the total reaction time for hits and errors\n hRTT = HitsParams['ThirdLickHitTS'] - HitsParams['StimHitsTS']\n eRTT = IncParams ['ThirdLickHitTS'] - IncParams ['StimHitsTS']\n\n # check the reaction time for each intersection case and\n # get the indices of the minimum\n hIndx = np.searchsorted(HitsParams['StimHitsIndx'], indx)\n eIndx = np.searchsorted(IncParams ['StimHitsIndx'], indx)\n minIndx = np.flatnonzero(np.argmin([ hRTT[hIndx], eRTT[eIndx] ], axis = 0))\n\n # eliminate those indices that are shared\n HitsParams['StimHitsIndx'] = np.delete( HitsParams['StimHitsIndx'], indx[minIndx])\n IncParams['StimHitsIndx'] = np.delete( IncParams['StimHitsIndx'], indx[minIndx] == False)\n\n # Get the stimulus timestamps again\n HitsParams['StimHitsTS'] = StimTS[HitsParams['StimHitsIndx']]\n IncParams['StimHitsTS'] = StimTS[IncParams['StimHitsIndx']]\n\n # With the true hit indices recalculate all the hits and errors\n HitsParams = GetHits(HitsParams['StimHitsTS'], hLick)\n IncParams = GetHits(IncParams ['StimHitsTS'], eLick)\n\n # now get the misses\n MissIndx = np.arange(StimTS.size)\n tmp = np.concatenate([HitsParams['StimHitsIndx'],\n IncParams['StimHitsIndx']])\n MissIndx = np.delete(MissIndx, tmp)\n MissTS = StimTS[MissIndx]\n\n # ... and the rest of the parameters\n HitsTS = HitsParams['StimHitsTS']\n HitsIndx = HitsParams['StimHitsIndx']\n ErrTS = IncParams['StimHitsTS']\n ErrIndx = IncParams['StimHitsIndx']\n\n # for the case of no hits and errors > 0\n elif HitsParams['StimHitsTS'].size == 0 and IncParams ['StimHitsIndx'].size > 0:\n\n # get the incorrects\n ErrTS = IncParams['StimHitsTS']\n ErrIndx = IncParams['StimHitsIndx']\n\n # Calculate the true Misses\n MissIndx = np.delete(range(len(StimTS)), IncParams['StimHitsIndx'])\n MissTS = StimTS[MissIndx]\n\n # the hits are simply empty arrays\n HitsTS = np.array([])\n HitsIndx = np.array([])\n\n # for the case of no Hits and no errors\n elif HitsParams['StimHitsTS'].size == 0 and IncParams ['StimHitsIndx'].size == 0:\n HitsTS = np.array([])\n HitsIndx = np.array([])\n ErrTS = np.array([])\n ErrIndx = np.array([])\n MissTS = StimTS\n MissIndx = np.arange(StimTS.size)\n\n # in case there are no incorrects in the Vars dictionary\n else:\n HitsTS = HitsParams['StimHitsTS']\n HitsIndx = HitsParams['StimHitsIndx']\n MissTS = HitsParams['StimMissTS']\n MissIndx = HitsParams['StimMissIndx']\n ErrTS = np.array([])\n ErrIndx = np.array([])\n\n # fill the data structure with the information we have calculated\n Data[CurStim]['Descr'] = str(k[1])\n Data[CurStim]['HitsTS'] = HitsTS\n Data[CurStim]['HitsIndx'] = HitsIndx\n Data[CurStim]['ErrTS'] = ErrTS\n Data[CurStim]['ErrIndx'] = ErrIndx\n Data[CurStim]['MissTS'] = MissTS\n Data[CurStim]['MissIndx'] = MissIndx\n Data[CurStim]['StimTS'] = StimTS\n \n # add nose poke information\n if Vars.has_key('NpIn'): Data[CurStim]['NpIn'] = Vars['NpIn']\n if Vars.has_key('NpOut'): Data[CurStim]['NpOut'] = Vars['NpOut']\n if k[3]=='LLick': RpIn = Vars['LRpIn']\n elif k[3]=='RLick': RpIn = Vars['RRpIn']\n Data[CurStim]['RpIn'] = RpIn\n\n # add the lick information\n if k[3] in Vars.keys():\n Lick = Vars[k[3]]\n Data[CurStim]['Lick'] = Vars[k[3]]\n \n # add the appropriate solenoid timestamps\n if k[0] == 1:\n if 'Solnd1' in Vars and Vars['Solnd1'].size > 0:\n Data[CurStim]['Solnd'] = Vars['Solnd1']\n elif 'Solnd2' in Vars and Vars['Solnd2'].size > 0:\n Data[CurStim]['Solnd'] = Vars['Solnd2']\n elif k[0] == 2:\n if 'Solnd3' in Vars and Vars['Solnd3'].size > 0:\n Data[CurStim]['Solnd'] = Vars['Solnd3']\n elif 'Solnd4' in Vars and Vars['Solnd4'].size > 0:\n Data[CurStim]['Solnd'] = Vars['Solnd4']\n\n # Calculate Reaction Times if are there any hits\n if HitsTS.size > 0:\n\n # get the total reaction time and add it to the data structure\n RTT = HitsParams['ThirdLickHitTS'] - HitsParams['StimHitsTS']\n Data[CurStim]['RTT'] = RTT\n\n # check the presence of the nosepoke variables as well as\n # that the training protocol is a NosePoke task\n if Vars.has_key('NpIn') and Vars.has_key('NpOut') and \\\n re.search('NP(?=0[0-9]A?)[0]', Data['MSN']):\n\n # calculate the foreperiod\n RT0, _, _ = SparseDistance(HitsTS, Vars['NpIn'], direction = 'yx')\n\n #Calculate RT1\n RT1, RT1x, RT1y = SparseDistance(HitsTS, Vars['NpOut'], direction = 'xy')\n #pdb.set_trace()\n #Calculate RT2 (from NP exit to Resp Port In)\n RT2, RT2x, RT2y = SparseDistance(Vars['NpOut'][RT1y], RpIn, direction = 'xy')\n\n #Calculate RT3 (from Resp Port In to First Lick)\n RT3, RT3x, RT3y = SparseDistance(RpIn[RT2y], Lick, direction = 'xy')\n\n # RT4 is the time from the first lick to the third lick\n RT4 = HitsParams['ThirdLickHitTS'] - HitsParams['FirstLickHitTS']\n\n else:\n RT0 = np.array([])\n RT1 = np.array([])\n RT2 = np.array([])\n RT3 = np.array([])\n RT4 = np.array([])\n\n else:\n RT0 = np.array([])\n RT1 = np.array([])\n RT2 = np.array([])\n RT3 = np.array([])\n RT4 = np.array([])\n RTT = np.array([])\n\n Data[CurStim]['RT0'] = RT0\n Data[CurStim]['RT1'] = RT1\n Data[CurStim]['RT2'] = RT2\n Data[CurStim]['RT3'] = RT3\n Data[CurStim]['RT4'] = RT4\n Data[CurStim]['RTT'] = RTT\n\n # Calculate the parameters for the catch trials\n elif k[0] == 3 and k[1] in Vars:\n\n # only add the stimuli that were presented after the first\n # yellow LED in the nose poke\n if 'NPLed' in Vars:\n StimTS = Vars[ k[1] ][ Vars[ k[1] ] > Vars['NPLed'][0] ]\n else:\n StimTS = Vars[k[1]]\n \n # eliminate those that are not valid\n if (Data['X'][-1] - StimTS[-1]) < 3.0:\n StimTS = StimTS[0:-1]\n\n if Vars.has_key('LLick') and Vars.has_key('RLick'):\n Lick = np.concatenate((Vars['LLick'], Vars['RLick']))\n elif 'LLick' not in Vars.keys():\n Lick = Vars['RLick']\n elif 'RLick' not in Vars.keys():\n Lick = Vars['LLick']\n\n Lick.sort()\n\n # get the hits for the catch trials\n CatchParams = GetHits(StimTS, Lick)\n\n Data[CurStim]['Descr'] = 'Catch'\n Data[CurStim]['HitsTS'] = CatchParams['StimHitsTS']\n Data[CurStim]['HitsIndx'] = CatchParams['StimHitsIndx']\n Data[CurStim]['MissTS'] = CatchParams['StimMissTS']\n Data[CurStim]['MissIndx'] = CatchParams['StimMissIndx']\n Data[CurStim]['StimTS'] = StimTS\n Data[CurStim]['Lick'] = Lick\n\n if Vars.has_key('LRpIn') and Vars.has_key('RRpIn'):\n RpIn = np.concatenate([Vars['LRpIn'], Vars['RRpIn']])\n elif Vars.has_key('LRpIn') and not Vars.has_key('RRpIn'):\n RpIn = Vars['RRpIn']\n elif Vars.has_key('RRpIn') and not Vars.has_key('LRpIn'):\n RpIn = Vars['LRpIn']\n\n RpIn.sort()\n\n Data[CurStim]['RpIn'] = RpIn\n\n if Vars.has_key('NpIn'): Data[CurStim]['NpIn'] = Vars['NpIn']\n if Vars.has_key('NpOut'): Data[CurStim]['NpOut'] = Vars['NpOut']\n\n #pdb.set_trace()\n # now calculate the catch trial reaction times\n if CatchParams['StimHitsIndx'].size > 0:\n\n # first get the total reaction time\n RTT = CatchParams['ThirdLickHitTS'] - CatchParams['StimHitsTS']\n Data[CurStim]['RTT'] = RTT\n\n # get the foreperiods\n if 'NpIn' in Vars.keys():\n RT0, RT0x, RT0y = SparseDistance(CatchParams['StimHitsTS'], Vars['NpIn'], direction = 'yx')\n\n # get RT1 (from stim to nose poke exit)\n if 'NpOut' in Vars.keys():\n RT1, RT1x, RT1y = SparseDistance(CatchParams['StimHitsTS'], Vars['NpOut'], direction = 'xy')\n\n # then RT2 (from nose poke exito to response port in)\n RT2, RT2x, RT2y = SparseDistance( Vars['NpOut'][RT1y], RpIn, direction = 'xy')\n\n # then RT3 (from resp port in to first lick)\n RT3, RT3x, RT3y = SparseDistance(RpIn[RT2y], Lick, direction = 'xy')\n\n # RT4 is the time from the first lick to the third lick\n RT4 = CatchParams['ThirdLickHitTS'] - CatchParams['FirstLickHitTS']\n\n else:\n RT0 = np.array([])\n RT1 = np.array([])\n RT2 = np.array([])\n RT3 = np.array([])\n RT4 = np.array([])\n RTT = np.array([])\n\n Data[CurStim]['RT0'] = RT0\n Data[CurStim]['RT1'] = RT1\n Data[CurStim]['RT2'] = RT2\n Data[CurStim]['RT3'] = RT3\n Data[CurStim]['RT4'] = RT4\n Data[CurStim]['RTT'] = RTT\n\n\n if not Data[CurStim]:\n Data.pop(CurStim)\n\n return Data\n\n########################################################################################################################\n\ndef GetHits(StimTS, LickTS, RespWin = 3.0, WetLick = 3):\n '''If len(StimTS)=m and len(LickTS)=n\n create a two matrices of m x n and (n x m)'''\n #pdb.set_trace()\n LickTS = np.array(LickTS, ndmin = 2)\n StimTS = np.array(StimTS, ndmin = 2)\n xx = np.tile(StimTS, (LickTS.size,1) )\n yy = np.tile(LickTS, (StimTS.size,1) ).transpose()\n Dif = np.round(yy - xx, 3)\n Dif[Dif<0.00] = 1e6\n\n #Find the indices\n LickIndx = Dif.argmin(0)[Dif.min(0)<1e6]\n\n Res = {}\n #Get the time of the third lick\n ValidStims = np.flatnonzero( LickIndx+WetLick-1 <= LickTS.size-1 )\n ValidStims\n #DryLicks = Dif[LickIndx[ValidStims], ValidStims]\n WetLicksIndx = LickIndx[ValidStims] + WetLick -1\n WetLicks = Dif[WetLicksIndx, ValidStims]\n\n Res['StimHitsIndx'] = np.flatnonzero( WetLicks <= RespWin )\n Res['StimMissIndx'] = np.delete( np.arange(StimTS.size), Res['StimHitsIndx'] )\n Res['StimHitsTS'] = StimTS[0, Res['StimHitsIndx'] ]\n Res['StimMissTS'] = StimTS[0, Res['StimMissIndx'] ]\n Res['FirstLickHitIndx'] = LickIndx[ValidStims][ Res['StimHitsIndx'] ]\n Res['ThirdLickHitIndx'] = WetLicksIndx[ Res['StimHitsIndx'] ]\n Res['FirstLickHitTS'] = LickTS[0, Res['FirstLickHitIndx'] ]\n Res['ThirdLickHitTS'] = LickTS[0, Res['ThirdLickHitIndx'] ]\n\n return Res\n\n########################################################################################################################\n\ndef DistXY(x, y):\n ''' Obtain the minimum distances between the events in two vectors\n of timestamps of different lenght. Note: Make sure that y happens after x'''\n x = np.array(x, ndmin=1)\n y = np.array(y, ndmin=1)\n xx = np.tile(x, (np.size(y), 1))\n yy = np.tile(y, (np.size(x), 1)).transpose()\n Dif = np.round_(yy - xx, 3)\n Dif[Dif<0.00] = 1e6\n iDif = Dif.argmin(0)\n Dif = Dif.min(0)\n e = Dif != 1e6\n Dif = Dif[e]\n iDif = iDif[e]\n\n return Dif, iDif\n\n########################################################################################################################\n\ndef DistYX(x, y):\n ''' Obtain the distances between the events in two vectors\n of timestamps of different lenght. Note: Make sure that x happens after y'''\n x = np.array(x, ndmin=1)\n y = np.array(y, ndmin=1)\n xx = np.tile(x,(np.size(y),1))\n yy = np.tile(y,(np.size(x),1)).transpose()\n Dif = np.round(xx - yy,3)\n Dif[Dif<0.00] = 1e6\n iDif = Dif.argmin(0)\n Dif = Dif.min(0)\n e = Dif != 1e6\n Dif = Dif[e]\n iDif = iDif[e]\n\n return Dif, iDif\n\n########################################################################################################################\n\ndef SparseDistance(x, y, direction = 'xy', maxTime = 1e6):\n\n '''Sparse calculation of minimum distance between two vectors\n of different length.\n\n Inputs:\n x,y:\n vectors of timestamps of different length\n direction:\n \"xy\" if y happens after x \"yx\" x happens after y\n maxTime:\n maximum time lag between the two events\n\n Output:\n Dif:\n distances between the vectors.\n xIndxDif:\n indices of the first vector\n yIndxDif:\n indices of the second vector that give those differeces'''\n\n x = np.array(x, ndmin = 2)\n y = np.array(y, ndmin = 2)\n\n if x.size ==0 or y.size ==0:\n return np.array([]), np.array([]), np.array([])\n\n xx = np.tile(x, (y.size, 1))\n yy = np.tile(y, (x.size, 1)).transpose()\n\n if direction == 'xy':\n Dif = np.round(yy - xx, 3)\n elif direction == 'yx':\n Dif = np.round(xx - yy, 3)\n\n Dif[Dif < 0.00] = maxTime\n\n if x.size > y.size:\n xIndx = Dif.argmin(1)\n yIndx = Dif.argmin(0)[xIndx]\n else:\n yIndx = Dif.argmin(0)\n xIndx = Dif.argmin(1)[yIndx]\n\n Dif = Dif[yIndx, xIndx]\n indx = np.flatnonzero(Dif < maxTime)\n xIndx = xIndx[indx]\n yIndx = yIndx[indx]\n Dif = Dif[indx]\n\n return (Dif, xIndx, yIndx)\n\n########################################################################################################################\n\ndef SparseDistance2(x, y):\n\n xy = np.concatenate([x,y],1)\n mainIndx = np.argsort(xy)\n xySort = np.sort(xy)\n if x.size < y.size:\n Indx = np.flatnonzero(mainIndx < x.size)\n else:\n Indx = np.flatnonzero(mainIndx > x.size)\n\n if (Indx[-1]+1) < xySort.size:\n dist = xySort[Indx+1]-xySort[Indx]\n else:\n pass\n # find the biggest Indx followed by\n\n########################################################################################################################\n\ndef LoadBehFile(filename = None, InitialDir=''):\n\n if not filename:\n if InitialDir and os.path.isdir(InitialDir):\n p = InitialDir\n else:\n p = ''\n\n filename = QtGui.QFileDialog.getOpenFileNameAndFilter(caption='Select a *.beh file',\n filter='*.beh',\n directory = p)\n filename = str(filename[0])\n if len(filename) == 0: return\n\n else:\n filename = os.path.join(InitialDir, filename)\n\n if not os.path.isfile(filename): return\n\n Data = loadmat(filename)\n Data = Data['Data']\n Data['Subject'] = str(Data['Subject'])\n Data['File'] = str(Data['File'])\n Data['MSN'] = str(Data['MSN'])\n Data['Box'] = int(Data['Box'])\n Data['Experiment'] = int(Data['Experiment'])\n\n Stims = FindStims(Data)\n if not Stims:\n Data = GetBhvParams(Data)\n else:\n for k in Stims:\n for n in Data[k].keys():\n if Data[k].has_key('Descr'):\n Data[k]['Descr'] = str(Data[k]['Descr'])\n Data[k][n] = np.array(Data[k][n])\n\n return Data\n\n########################################################################################################################\n\ndef FindStims(Data):\n Stims=[k for k in Data.keys() if k.find('Stim')!=-1]\n Stims.sort()\n return Stims\n\n########################################################################################################################\n\ndef GetRatNames(prefix = 'HMV', pth = ''):\n\n if not pth:\n pth = str(QtGui.QFileDialog.getExistingDirectory(caption = 'Beh Files Directory'))\n if not pth: return\n\n files = glob(os.path.join(pth, '*'))\n names = []\n for k in files:\n r=re.search('%s[0-9]{1,2}(?=_)' % prefix, os.path.split(k)[1])\n if r:\n names.append(r.group())\n\n ratnames = np.unique(names)\n ratnums = []\n for k in ratnames:\n ratnums.append(int(re.search('(?<=%s)[0-9]{1,2}' % prefix, k).group()))\n ratnums = np.array(ratnums)\n return list(ratnames[ratnums.argsort()])\n\n########################################################################################################################\n\ndef SplitMPCFiles(filename, outpth):\n\n import shutil\n\n fid=open(filename,'rU')\n w=fid.readlines()\n fid.close()\n\n filehdr=w[0]\n lines='\\n\\n\\n'\n st=[]\n\n for x,y in enumerate(w):\n if y.find('Start Date')!=-1:\n st.append(x)\n\n st.append(len(w))\n pth,outfile=os.path.split(filename)\n\n if os.path.isdir(os.path.join(outpth,outfile.replace(' ','_'))):\n shutil.rmtree(os.path.join(outpth,outfile.replace(' ','_')))\n\n os.mkdir(os.path.join(outpth,outfile.replace(' ','_')))\n\n for x,y in enumerate(st):\n if x+2>len(st):\n break\n fid=open(os.path.join(outpth,outfile.replace(' ','_'),outfile+string.lowercase[x]),'w')\n temp=w[y:st[x+1]]\n temp.insert(0,lines)\n temp.insert(0,filehdr)\n fid.writelines(temp)\n fid.close()\n\n########################################################################################################################\n\ndef GetFilenames(RatName, RegExp = '1T_REW[0-9].beh|NP0[0-9]A?.beh', BhvDir = ''):\n\n if not os.path.isdir(BhvDir):\n print 'That directory does not exist !'\n return\n\n filesList = glob(os.path.join(BhvDir,RatName+'_*.beh'))\n if not filesList: return\n filesList.sort()\n files=[]; MSN=[]\n\n # iterate over the list of files\n for f in filesList:\n \n # if the regular expression is found \n if re.search(RegExp, f, re.IGNORECASE):\n files.append(f)\n\n # Try to find the MSN in the file name\n match = re.search('(?<=_[0-9]{4}_)[HMV]?.*(?=\\.beh)', f)\n if match:\n MSN.append(match.group())\n # try to eliminate the 'HMV_' at the beggining of the MSN name\n if re.search('HMV_', MSN[-1]):\n MSN[-1] = re.search('(?<=HMV_).*', MSN[-1]).group()\n\n return (files, MSN)\n\n########################################################################################################################\n\ndef rmBehFiles(pth = '', pattern = '*.beh'):\n # Deletes only the files that match a certain pattern\n\n if not pth:\n pth = str(QtGui.QFileDialog.getExistingDirectory(caption = 'Dir to Delete'))\n if not pth: return\n\n pattern=os.path.join(pth, pattern)\n files = glob(pattern)\n\n if files:\n for k in files:\n if os.path.isfile(k):\n os.remove(k)\n else:\n print 'nothing to delete !'\n\n########################################################################################################################\n\ndef PrintDataSummary(Data):\n\n print '================================================================================'\n for k in ['Subject','StartDate']:\n print '%s:\\t%s' % (k, str(Data[k]))\n for k in ['Box','MSN']:\n print '%s:\\t\\t%s' % (k, str(Data[k]))\n\n Stims=[k for k in Data.keys() if k.find('Stim')!=-1]\n if not Stims:\n return\n else:\n Stims.sort()\n\n keys=['HitsTS','MissTS','ErrTS']\n\n for k in Stims:\n print 'Stim: %s\\t' % Data[k]['Descr'],\n for j in keys:\n if Data[k].has_key(j):\n if type(Data[k][j]) in [np.ndarray, list]:\n print '%s:\\t%s\\t' % ( j[0:-2], str(Data[k][j].size) ),\n else:\n print '%s:\\t%s\\t' % ( j[0:-2], '' ),\n if Data[k].has_key('RTT') and Data[k]['RTT'].size>0:\n print 'RTT:\\t%0.2f\\t' % np.mean(Data[k]['RTT'])\n else:\n print\n print '================================================================================'\n\n########################################################################################################################\n\ndef loadmat(filename):\n '''\n this function should be called instead of direct spio.loadmat\n as it cures the problem of not properly recovering python dictionaries\n from mat files. It calls the function check keys to cure all entries\n which are still mat-objects\n '''\n data = spio.loadmat(filename, struct_as_record=False, squeeze_me=True)\n return _check_keys(data)\n\ndef _check_keys(dict):\n '''\n checks if entries in dictionary are mat-objects. If yes\n todict is called to change them to nested dictionaries\n '''\n for key in dict:\n if isinstance(dict[key], spio.matlab.mio5_params.mat_struct):\n dict[key] = _todict(dict[key])\n return dict\n\ndef _todict(matobj):\n '''\n A recursive function which constructs from matobjects nested dictionaries\n '''\n dict = {}\n for strg in matobj._fieldnames:\n elem = matobj.__dict__[strg]\n if isinstance(elem, spio.matlab.mio5_params.mat_struct):\n dict[strg] = _todict(elem)\n else:\n dict[strg] = elem\n return dict\n\n########################################################################################################################\n" }, { "alpha_fraction": 0.4646172523498535, "alphanum_fraction": 0.4859904646873474, "avg_line_length": 40.15284729003906, "blob_id": "3b36c7f594f54264cb07eb58a7528b3f3c6d93d2", "content_id": "a3e72d344bc64e4487a9212bbb18cd445bb430d6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 94043, "license_type": "no_license", "max_line_length": 151, "num_lines": 2231, "path": "/m_PlotBehavior.py", "repo_name": "hemanzur/Behavior_analysis", "src_encoding": "UTF-8", "text": "#PlotBehavior.py\r\n# Module containing al the routines to plot behavioral events\r\n\r\n########################################################################################################################\r\n## Module Initialization Routine\r\n\r\nimport os, sys, re\r\nfrom datetime import datetime as dtm\r\nfrom m_NeuralPlot import rasterPlot\r\n\r\nfrom scipy.stats import probplot, norm, gaussian_kde\r\nfrom scipy.optimize import curve_fit\r\n\r\nfrom matplotlib.axes import Axes\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib import rc\r\nimport numpy as np\r\n\r\nfrom PyQt4 import QtGui, QtCore\r\n\r\nimport guidata\r\nimport guidata.dataset.datatypes as dt\r\nimport guidata.dataset.dataitems as di\r\napp = guidata.qapplication()\r\n\r\nimport m_bhvfuncs as bhv\r\n\r\nfrom matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigCanvas\r\nfrom matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavToolbar\r\nfrom matplotlib.figure import Figure\r\n\r\nimport ipdb\r\n\r\n# MPL Widget Class to embed in Qt\r\nclass MplWidget(FigCanvas):\r\n def __init__(self, parent=None):\r\n self.fig = Figure()\r\n self.fig.set_facecolor('w')\r\n \r\n FigCanvas.__init__(self, self.fig)\r\n if parent: self.setParent(parent)\r\n\r\n FigCanvas.setSizePolicy(self,\r\n QtGui.QSizePolicy.Expanding,\r\n QtGui.QSizePolicy.Expanding)\r\n FigCanvas.updateGeometry(self)\r\n \r\n########################################################################################################################\r\n\r\ndef PlotLATER(Data=[], ax=[], OutlierCalc = 'pctile', PlotMiss=False,\r\n ShowFit=True, ShowTitle=False, ShowEq=False):\r\n\r\n if not Data: return\r\n\r\n # create a function to pass as parameter for the line fit\r\n def LineFuncFit(x, m, n): return m*x+n\r\n\r\n # x values vector for the fit\r\n xVal = np.linspace(-50,10)\r\n\r\n rc('font', size=10)\r\n rc('font', family='monospace')\r\n rc('font', serif='Bitstream Vera Sans Mono')\r\n\r\n # check if ax is an axes instance; if not, create one \r\n if ax == [] or not isinstance(ax, Axes):\r\n plt.figure(facecolor='w')\r\n ax=plt.subplot(111)\r\n\r\n # color list for different stimuli\r\n col=['b','g','r','c','m']\r\n\r\n # get the simuli in the data structure\r\n Stims = bhv.FindStims(Data)\r\n\r\n # iterate over stimuli\r\n for j,k in enumerate(Stims):\r\n\r\n # get the RTs\r\n RT1,_ = bhv.DistXY(Data[k]['StimTS'], Data[k]['NpOut'])\r\n\r\n # replace zeros with 0.001\r\n RT1[RT1==0] = 1e-3\r\n\r\n # do a probability plot\r\n a ,_ = probplot(-1/RT1, dist='norm')\r\n p, = ax.plot(a[1], a[0], '+', ms=7, mew=2, color=col[j],\r\n alpha=0.5, label=str(Data[k]['Descr'])+' Hits')\r\n\r\n # calculate a fit taking the central part of the distribution\r\n if ShowFit and len(RT1) > 10:\r\n \r\n # choose the method to calculate outliers\r\n if OutlierCalc == 'pctile':\r\n l, u = np.percentile(a[1], (15,95))\r\n elif OutlierCalc == 'mc':\r\n m,l,u = Outliers4Skew(a[1])\r\n\r\n # get indices of values and plot a line fit \r\n indx = np.flatnonzero((a[1] >l) & (a[1]<u ))\r\n popt, _= curve_fit(LineFuncFit, a[1][indx], a[0][indx])\r\n ax.plot(xVal, LineFuncFit(xVal, popt[0], popt[1]),\r\n lw=2, color=col[j], alpha=0.5, label='_nolegend_')\r\n\r\n if ShowEq:\r\n bp = dict(boxstyle=\"round\", alpha=0.7, fc='w', ec=[.5,.5,.5])\r\n ax.text(-9,0,r'$y_{Hits}=%0.2fx + %0.2f$' % (m, n), fontsize = 20, bbox = bp)\r\n\r\n # set limits\r\n ax.set_xlim(-8,0)\r\n ax.set_ylim(-3.5,3.5)\r\n \r\n # get the percentiles of a normal distribution for the y axes ticks\r\n NDist = norm(0, 1)\r\n Pctils = np.array([1,5,10,25,50,75,90,95,99])/100.0\r\n ytcks = [NDist.ppf(k) for k in Pctils]\r\n ax.set_yticks(ytcks)\r\n ax.set_yticklabels([str(k) for k in Pctils])\r\n\r\n # set x axes ticks and tick labels\r\n xtcks = np.array([0.1, 0.2, 0.3,0.5, 1, 5])\r\n ax.set_xticks(-1/xtcks)\r\n ax.set_xticklabels([str(k) for k in xtcks])\r\n\r\n # add legend and grid and axes labels\r\n ax.legend(loc='upper left', fancybox=True, prop={'size':10})\r\n ax.grid(alpha=0.6)\r\n ax.set_ylabel('Probability')\r\n ax.set_xlabel('RT1 (msec)')\r\n\r\n # if desired show title\r\n if ShowTitle:\r\n ax.set_title(Data['Subject']+' '+'%02d/%02d/%02d' % tuple(Data['StartDate'])+'\\n'+Data['MSN'],\r\n fontsize=10)\r\n ax.figure.tight_layout()\r\n\r\n########################################################################################################################\r\n\r\ndef PlotDensityFuncs(pth = ''):\r\n '''Examine the CDFs and/or PDFs of each RT'''\r\n\r\n if not pth:\r\n pth = str(QtGui.QFileDialog.getExistingDirectory(caption = 'Beh Files Directory'))\r\n if not pth: return\r\n \r\n ratNames = bhv.GetRatNames(prefix = 'HMV', pth = pth)\r\n if not ratNames: return\r\n \r\n class SetParams(dt.DataSet):\r\n RatName = di.ChoiceItem('Choose a Rat',tuple(ratNames))\r\n Task = di.StringItem('Regular Expr','NP0[0-9]A?.beh')\r\n PDF = di.BoolItem('Plot PDF ?',default=True).set_pos(col=0)\r\n CDF = di.BoolItem('Plot CDF ?',default=True).set_pos(col=1)\r\n Norm = di.BoolItem('Normalize ?',default=False).set_pos(col=0)\r\n\r\n Params=SetParams()\r\n if Params.edit()==1:\r\n \r\n RatName = ratNames[Params.RatName]\r\n Task = Params.Task\r\n files,_ = bhv.GetFilenames(RatName, RegExp=Params.Task, BhvDir = pth)\r\n if not 'files' in locals(): return\r\n \r\n Stim0=[]; Stim1=[]\r\n for k in files:\r\n Data = bhv.LoadBehFile(k)\r\n if not [x for x in Data.keys() if x.find('Stim')!=-1]:\r\n Data = bhv.GetBhvParams(Data)\r\n if Data.has_key('Stim0'):\r\n Stim0.append(Data['Stim0'])\r\n if Data.has_key('Stim1') and\\\r\n Data['Stim1'].has_key('RTT') and\\\r\n Data['Stim1'].has_key('RT1') and\\\r\n Data['Stim1'].has_key('RT2') and\\\r\n Data['Stim1'].has_key('RT3'):\r\n Stim1.append(Data['Stim1'])\r\n \r\n rc('font',size=9, family='monospace', serif='Bitstream Vera Sans Mono')\r\n\r\n r=np.linspace(0,1,len(files))\r\n b=np.linspace(1,0,len(files))\r\n\r\n keys = ['RTT','RT1','RT2','RT3']\r\n nS = len(files)\r\n\r\n if Params.CDF:\r\n plt.figure(dpi=100,facecolor='w')\r\n \r\n for m,n in enumerate(keys):\r\n m = m+1\r\n plt.subplot(2, 2, m)\r\n \r\n for j,k in enumerate(Stim0):\r\n if k[n].size>1:\r\n kernel = gaussian_kde(k[n])\r\n x = np.linspace(k[n].min(), k[n].min(), 100)\r\n y = np.cumsum(kernel(x))\r\n y = y/y[-1]\r\n #if Params.Norm==1: y=y/sum(y)\r\n plt.plot(x,y,color=[r[j],0,b[j]])\r\n \r\n plt.title(n+' n='+str(nS)+' sessions')\r\n plt.xlim(-0.2,3)\r\n\r\n if m in [1,3]:\r\n if Params.Norm==1: plt.ylabel('Probabilty')\r\n else: plt.ylabel('Count')\r\n if m in [3,4]:\r\n plt.xlabel('Time (sec)')\r\n\r\n plt.grid()\r\n\r\n plt.title(RatName + ' ' + Params.Task)\r\n plt.tight_layout()\r\n\r\n if Params.PDF:\r\n plt.figure(RatName, dpi=100, facecolor='w')\r\n for m,n in enumerate(keys):\r\n m=m+1\r\n plt.subplot(2,2,m)\r\n for j,k in enumerate(Stim0):\r\n if k[n].size>1:\r\n kernel = gaussian_kde(k[n])\r\n x = np.linspace(k[n].min(), k[n].min(), 100)\r\n y = kernel(x)\r\n \r\n #if Params.Norm==1: y=y/sum(y)\r\n plt.plot(x,y,color=[r[j],0,b[j]])\r\n plt.title(n+' n='+str(nS)+' sessions')\r\n plt.xlim(-0.2,3)\r\n if m in [1,3]:\r\n if Params.Norm==1: plt.ylabel('Probabilty')\r\n else: plt.ylabel('Count')\r\n if m in [3,4]:\r\n plt.xlabel('Time (sec)')\r\n plt.grid()\r\n plt.title(RatName + ' ' + Params.Task)\r\n plt.tight_layout()\r\n\r\n########################################################################################################################\r\n\r\ndef RxTimeEvolution(pth = ''):\r\n \"\"\"Load the required files for a given rat and task and extract the stimuli\"\"\"\r\n \r\n import scipy.stats as st\r\n \r\n ratNames = bhv.GetRatNames(prefix = 'HMV', pth = pth)\r\n class SetParams(dt.DataSet):\r\n RatName = di.ChoiceItem('Choose a Rat', tuple(ratNames))\r\n Task = di.StringItem('Regular Expr','(NP0[0-9]A?.beh)')\r\n SaveFig = di.BoolItem(text='', label='SaveFig ?')\r\n\r\n Params=SetParams()\r\n \r\n if Params.edit()==1:\r\n RatName = ratNames[Params.RatName]\r\n Task = Params.Task\r\n files, _ = bhv.GetFilenames(RatName, RegExp=Task, BhvDir = pth)\r\n FigHandles=[]\r\n Dates=[]; Stims={}\r\n for k in range(10):\r\n Stims['Stim'+str(k)]=[]\r\n \r\n for k in files:\r\n Data=bhv.LoadBehFile(k)\r\n Dates.append(Data['StartDate'])\r\n if not [x for x in Data.keys() if x.find('Stim')!=-1]:\r\n Data=bhv.GetBhvParams(Data)\r\n s=[k for k in Data.keys() if k.find('Stim')!=-1]\r\n s.sort()\r\n \r\n for x in s:\r\n Stims[x].append(Data[x])\r\n\r\n for k in Stims.keys():\r\n if not any(Stims[k]):\r\n Stims.pop(k)\r\n\r\n for x in Stims:\r\n Stim=Stims[x]\r\n ## Calculate the average RxTimes across sessions and plot them in a bar graph\r\n\r\n mRTT=[]; mRT1=[]; mRT2=[]; mRT3=[];\r\n eRTT=np.zeros([len(Stim),2]); eRT1=np.zeros([len(Stim),2]);\r\n eRT2=np.zeros([len(Stim),2]); eRT3=np.zeros([len(Stim),2])\r\n\r\n for j,k in enumerate(Stim):\r\n\r\n if k.has_key('RTT'):\r\n k['RTT']=k['RTT'].flatten()\r\n mRTT=np.append(mRTT, st.nanmean(k['RTT']))\r\n if k['RTT'].size>1:\r\n eRTT[j,:]=bootci(k['RTT'],100)\r\n else:\r\n eRTT[j,:]=[0,0]\r\n else:\r\n mRTT = np.append(mRTT,0)\r\n\r\n if k.has_key('RT1'):\r\n k['RT1']=k['RT1'].flatten()\r\n mRT1 = np.append(mRT1, st.nanmean(k['RT1']))\r\n if k['RT1'].size>1:\r\n eRT1[j,:]=bootci(k['RT1'],100)\r\n else:\r\n eRT1[j,:]=[0,0]\r\n else:\r\n mRT1 = np.append(mRT1,0)\r\n\r\n if k.has_key('RT2'):\r\n k['RT2']=k['RT2'].flatten()\r\n mRT2 = np.append(mRT2, st.nanmean(k['RT2']))\r\n if k['RT2'].size>1:\r\n eRT2[j,:]=bootci(k['RT2'],100)\r\n else:\r\n eRT2[j,:]=[0,0]\r\n else:\r\n mRT2 = np.append(mRT2,0)\r\n\r\n if k.has_key('RT3'):\r\n k['RT3']=k['RT3'].flatten()\r\n mRT3 = np.append(mRT3, st.nanmean(k['RT3']))\r\n if k['RT3'].size>1:\r\n eRT3[j,:]=bootci(k['RT3'],100)\r\n else:\r\n eRT3[j,:]=[0,0]\r\n else:\r\n mRT3 = np.append(mRT3,0)\r\n\r\n mRTT=np.nan_to_num(mRTT)\r\n mRT1=np.nan_to_num(mRT1)\r\n mRT2=np.nan_to_num(mRT2)\r\n mRT3=np.nan_to_num(mRT3)\r\n eRT1=[mRT1-eRT1[:,0],eRT1[:,1]-mRT1]\r\n eRT2=[mRT2-eRT2[:,0],eRT2[:,1]-mRT2]\r\n eRT3=[mRT3-eRT3[:,0],eRT3[:,1]-mRT3]\r\n\r\n # Set some figure properties\r\n rc('font',size=9)\r\n rc('font',family='monospace')\r\n rc('font',serif='Bitstream Vera Sans Mono')\r\n\r\n f=plt.figure(dpi=100,facecolor='w')\r\n FigHandles.append(f)\r\n plt.subplot2grid((1,5),(0,0),colspan=4)\r\n\r\n # Stacked barplots of the data\r\n b1=plt.bar(range(1,len(Stim)+1),mRT1,bottom=0,\r\n yerr=eRT1,color=[.3,.5,.8],align='center',edgecolor='',\r\n error_kw={'elinewidth':2.5})\r\n\r\n b2=plt.bar(range(1,len(Stim)+1),mRT2,bottom=mRT1,\r\n yerr=eRT2,\r\n color=[.3,.8,.5],align='center',edgecolor='',\r\n error_kw={'elinewidth':2.5})\r\n\r\n x=mRT1+mRT2\r\n b3=plt.bar(range(1,len(Stim)+1),mRT3,bottom=x,\r\n yerr=eRT3,color=[.8,.5,.3],align='center',edgecolor='',\r\n error_kw={'elinewidth':2.5})\r\n\r\n plt.legend([b1,b2,b3],['RT1','RT2','RT3'],mode='none',ncol=1,fancybox=True,loc=0)\r\n plt.title(RatName+' '+Task)\r\n plt.ylim(0,1.1*mRTT.max())\r\n plt.xticks(range(1,len(Stim)+1),range(1,len(Stim)+1))\r\n plt.xlim(0,len(Stim)+1)\r\n plt.xlabel('Session Number')\r\n plt.ylabel('Time (sec)')\r\n plt.grid()\r\n\r\n # To plot the average Reaction times\r\n\r\n plt.subplot2grid((1,5),(0,4),colspan=4)\r\n\r\n mmRT1=st.nanmean(mRT1);\r\n eeRT1=bootci(mRT1,100)\r\n eeRT1=np.array([[mmRT1-eeRT1[0]],[eeRT1[1]-mmRT1]])\r\n b1=plt.bar(1, mmRT1, bottom=0, color=[.3,.5,.8], edgecolor='',\r\n align='center', yerr=eeRT1, error_kw={'elinewidth':2.5})\r\n\r\n mmRT2=st.nanmean(mRT2);\r\n eeRT2=bootci(mRT2,100)\r\n eeRT2=np.array([[mmRT2-eeRT2[0],eeRT2[1]-mmRT2]])\r\n b2=plt.bar(1, mmRT2, bottom=mmRT1, color=[.3,.8,.5], edgecolor='',\r\n align='center', yerr=eeRT2, error_kw={'elinewidth':2.5})\r\n\r\n b=mmRT1+mmRT2\r\n mmRT3=st.nanmean(mRT3);\r\n eeRT3=bootci(mRT3,100)\r\n eeRT3=np.array([[mmRT3-eeRT3[0],eeRT3[1]-mmRT3]])\r\n b3=plt.bar(1, mmRT3, bottom=b, color=[.8,.5,.3], edgecolor='',\r\n align='center', yerr=eeRT3, error_kw={'elinewidth':2.5})\r\n\r\n plt.title('Average')\r\n plt.legend([b1,b2,b3],['RT1','RT2','RT3'],fancybox=True)\r\n plt.xlim(0,2)\r\n plt.ylim(0,1.1*max(mRTT))\r\n plt.xticks([1],['Avg'])\r\n plt.grid()\r\n plt.tight_layout()\r\n\r\n if Params.SaveFig:\r\n SaveFigure(FigHandles,FigName=RatName+'_'+Params.Task+'_RxTimesEvolution')\r\n \r\n\r\n########################################################################################################################\r\n\r\ndef Raster2(Stim, Event, TWin=[1,4], axes=[], yAxVar = 'Trial', color='k', mew=1, alpha=1):\r\n\r\n if not axes:\r\n axes=plt.subplot(111)\r\n elif not isinstance(axes, Axes):\r\n axes=plt.subplot(111)\r\n \r\n Event2 = []\r\n y = []\r\n if yAxVar == 'Trial':\r\n for j,k in enumerate(Stim):\r\n tmp = Event[( Event > k - TWin[0] ) & ( Event < k + TWin[1] )] -k\r\n Event2.extend(tmp)\r\n y.extend(j*np.ones_like(tmp))\r\n elif yAxVar == 'Time':\r\n for k in Stim:\r\n tmp = Event[( Event > k - TWin[0] ) & ( Event < k + TWin[1] )] -k\r\n Event2.extend(tmp)\r\n y.extend(k*np.ones_like(tmp))\r\n \r\n axes.plot(Event2, y,'|', color=color, mew=mew, alpha=alpha, rasterized = True)\r\n axes.set_xlim(-TWin[0], TWin[1])\r\n \r\n########################################################################################################################\r\n\r\ndef Raster3(Stim, EventTS, TWin=[1.0,4.0], ax=[], yAxVar = 'Trial', color='k', lw=1, alpha=1):\r\n '''Makes use of the new eventplot function in matplotlib 1.3'''\r\n\r\n if not ax:\r\n ax=plt.subplot(111)\r\n elif not isinstance(ax, Axes):\r\n ax=plt.subplot(111)\r\n #pdb.set_trace()\r\n Event2 = []\r\n y = []\r\n if yAxVar == 'Trial':\r\n for j,k in enumerate(Stim):\r\n tmp = EventTS[( EventTS > k - TWin[0] ) & ( EventTS < k + TWin[1] )] -k\r\n if tmp.size > 0:\r\n Event2.append(tmp)\r\n else:\r\n Event2.append(np.array(100, ndmin = 1))\r\n #Event2.append(None)\r\n y.append(j)\r\n elif yAxVar == 'Time':\r\n for k in Stim:\r\n tmp = EventTS[( EventTS > k - TWin[0] ) & ( EventTS < k + TWin[1] )] -k\r\n Event2.append(tmp)\r\n y.append(k)\r\n \r\n rasterPlot(Event2, ax=ax, color = color, lw = lw, alpha = alpha)\r\n #ax.eventplot(Event2, lineoffsets = y, colors=[color], alpha = alpha, lw = lw)\r\n ax.set_xlim(-TWin[0], TWin[1])\r\n\r\n########################################################################################################################\r\n\r\nclass Settings(dt.DataSet):\r\n '''Select the paramters to save the currently active figure'''\r\n BhvDir = di.DirectoryItem(label='Beh Dir')\r\n ImgDir = di.DirectoryItem(label='Img Dir')\r\n Format = di.ChoiceItem(label='Format', choices=[('.jpg','.jpg'),('.png','.png'),('.svg','.svg'),('.pdf','.pdf')])\r\n dpi = di.IntItem(label='dpi', default=300, min=50, max=600, nonzero=True, slider=True)\r\n \r\nsettings = Settings()\r\n\r\nclass BhvRasters(QtGui.QMainWindow):\r\n \r\n def __init__(self, workingDir = None):\r\n if not workingDir:\r\n self.BhvDir = str(QtGui.QFileDialog.getExistingDirectory())\r\n if not self.BhvDir: return\r\n else:\r\n self.BhvDir = workingDir\r\n \r\n QtGui.QMainWindow.__init__(self)\r\n self.setWindowTitle('Raw Behavioral Data Browser')\r\n self.main_widget = QtGui.QWidget(self)\r\n self.main_layout = QtGui.QHBoxLayout(self.main_widget)\r\n self.main_layout.setMargin(0)\r\n self.main_layout.setSpacing(0)\r\n self.oldMSN = ''\r\n self.StimColors = ['b','g','r','c','m','y']\r\n \r\n ########### STATUS BAR ##############################################\r\n \r\n## self.StBar=self.statusBar()\r\n## self.StBar.showMessage('')\r\n \r\n ###################################################################################################\r\n\r\n self.left_panel = QtGui.QWidget(self)\r\n self.left_panel.setMaximumWidth(400)\r\n self.left_panel.setMinimumWidth(400)\r\n left_panel_lay = QtGui.QVBoxLayout(self.left_panel)\r\n left_panel_lay.setMargin(2)\r\n \r\n grp1 = QtGui.QGroupBox('Get Files', self.left_panel)\r\n vlay = QtGui.QVBoxLayout()\r\n vlay.setSpacing(2)\r\n vlay.setMargin(2)\r\n hlay = QtGui.QHBoxLayout()\r\n self.RegExpEdit = QtGui.QLineEdit(self.main_widget)\r\n self.RegExpEdit.setText('NP04A_(6K|CentWht)R_12KL')\r\n hlay.addWidget(QtGui.QLabel('RegExp',self))\r\n hlay.addWidget(self.RegExpEdit)\r\n vlay.addLayout(hlay)\r\n \r\n hlay = QtGui.QHBoxLayout()\r\n self.selRatName= QtGui.QComboBox(self.main_widget)\r\n self.selRatName.addItems(bhv.GetRatNames(pth = self.BhvDir))\r\n self.selRatName.currentIndexChanged.connect(self.GetFiles)\r\n hlay.addWidget(self.selRatName)\r\n \r\n self.GetFilesBtn = QtGui.QPushButton('GetFiles',self.main_widget)\r\n self.GetFilesBtn.clicked.connect(self.GetFiles)\r\n hlay.addWidget(self.GetFilesBtn)\r\n vlay.addLayout(hlay)\r\n\r\n self.SettingsBtn = QtGui.QPushButton('Settings',self.main_widget)\r\n self.SettingsBtn.clicked.connect(self.editSettings)\r\n vlay.addWidget(self.SettingsBtn)\r\n \r\n grp1.setLayout(vlay)\r\n left_panel_lay.addWidget(grp1)\r\n## left_panel_lay.addStretch(1)\r\n\r\n ###################################################################################################\r\n ## Parameters Group BOx\r\n \r\n grp2 = QtGui.QGroupBox('Parameters', self.left_panel)\r\n \r\n vlay = QtGui.QVBoxLayout()\r\n vlay.setSpacing(2)\r\n vlay.setMargin(2)\r\n \r\n hlay = QtGui.QHBoxLayout()\r\n self.AxNRows = QtGui.QSpinBox()\r\n self.AxNRows.setMinimum(1)\r\n self.AxNRows.setMaximum(3)\r\n self.AxNRows.setValue(2)\r\n hlay.addWidget(QtGui.QLabel('AxNRows'))\r\n hlay.addWidget(self.AxNRows)\r\n hlay.addStretch(1)\r\n self.AxNCols = QtGui.QSpinBox()\r\n self.AxNCols.setMinimum(1)\r\n self.AxNCols.setMaximum(5)\r\n self.AxNCols.setValue(3)\r\n hlay.addWidget(QtGui.QLabel('AxNCols'))\r\n hlay.addWidget(self.AxNCols)\r\n self.SetNAxesBtn = QtGui.QPushButton('Set Axes')\r\n self.SetNAxesBtn.clicked.connect(self.SetNAxesProc)\r\n hlay.addStretch(1)\r\n hlay.addWidget(self.SetNAxesBtn)\r\n vlay.addLayout(hlay)\r\n\r\n self.naxes = self.AxNRows.value()*self.AxNCols.value()\r\n \r\n hlay = QtGui.QHBoxLayout()\r\n self.YAxesVarCombo = QtGui.QComboBox()\r\n self.YAxesVarCombo.addItems(['Trial','Time'])\r\n hlay.addWidget(QtGui.QLabel('Y Axes Var'))\r\n hlay.addWidget(self.YAxesVarCombo)\r\n hlay.addStretch(1)\r\n \r\n self.TWin1 = QtGui.QDoubleSpinBox(self.main_widget)\r\n self.TWin1.setRange(0.0, 4.0)\r\n self.TWin1.setSingleStep(0.1)\r\n self.TWin1.setValue(1.0)\r\n self.TWin1.setFixedWidth(60)\r\n hlay.addWidget(QtGui.QLabel('TWIN1'))\r\n hlay.addWidget(self.TWin1)\r\n hlay.addStretch(1)\r\n \r\n self.TWin2 = QtGui.QDoubleSpinBox(self.main_widget)\r\n self.TWin2.setRange(0.1, 10.0)\r\n self.TWin2.setSingleStep(0.1)\r\n self.TWin2.setValue(4.0)\r\n self.TWin2.setFixedWidth(60)\r\n hlay.addWidget(QtGui.QLabel('TWIN2'))\r\n hlay.addWidget(self.TWin2)\r\n vlay.addLayout(hlay)\r\n\r\n hlay = QtGui.QHBoxLayout()\r\n self.KernelType = QtGui.QComboBox()\r\n self.KernelType.addItems(['Hamming', 'Square'])\r\n hlay.addWidget(QtGui.QLabel('Kernel Type'))\r\n hlay.addWidget(self.KernelType)\r\n self.KernelSize = QtGui.QSpinBox()\r\n self.KernelSize.setRange(60, 1000)\r\n self.KernelSize.setValue(240)\r\n hlay.addStretch(1)\r\n hlay.addWidget(QtGui.QLabel('Kern Size'))\r\n hlay.addWidget(self.KernelSize)\r\n vlay.addLayout(hlay)\r\n\r\n hlay = QtGui.QHBoxLayout()\r\n self.RTPlotTypeCombo = QtGui.QComboBox()\r\n self.RTPlotTypeCombo.addItems(['Hist StepF','Hist Step','Hist Cum','PDFs','CDFs'])\r\n self.RTPlotTypeCombo.currentIndexChanged.connect(self.HitsTypeChanged_Proc)\r\n hlay.addWidget(QtGui.QLabel('RTs Plot Type'))\r\n hlay.addWidget(self.RTPlotTypeCombo)\r\n hlay.addStretch(1)\r\n self.HistTWin = QtGui.QDoubleSpinBox()\r\n self.HistTWin.setRange(0.1, 10.0)\r\n self.HistTWin.setValue(2.0)\r\n self.HistTWin.setSingleStep(0.1)\r\n hlay.addWidget(QtGui.QLabel('HistTWin'))\r\n hlay.addWidget(self.HistTWin)\r\n hlay.addStretch(1)\r\n self.HistNBins = QtGui.QSpinBox()\r\n self.HistNBins.setRange(0, 100)\r\n self.HistNBins.setValue(20)\r\n hlay.addWidget(QtGui.QLabel('NBins'))\r\n hlay.addWidget(self.HistNBins)\r\n vlay.addLayout(hlay)\r\n \r\n self.PlotTypeTable = QtGui.QTableWidget(self.naxes,5,self)\r\n## self.PlotTypeTable.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)\r\n## self.PlotTypeTable.updateGeometry()\r\n## self.PlotTypeTable.setMinimumHeight(350)\r\n self.PlotTypeTable.setVerticalHeaderLabels(['Ax%d' % k for k in range(1,self.naxes+1)])\r\n self.PlotTypeTable.setColumnWidth(0,65)\r\n self.PlotTypeTable.setColumnWidth(1,60)\r\n self.PlotTypeTable.setColumnWidth(2,60)\r\n self.PlotTypeTable.setColumnWidth(3,50)\r\n self.PlotTypeTable.setColumnWidth(4,90)\r\n self.PlotTypeTable.setHorizontalHeaderLabels(['PlotType','Stim','Ref','OutCm','Sort By'])\r\n \r\n self.PlotTypeCombo = []\r\n self.Stim2PlotCombo = []\r\n self.ReferenceCombo = []\r\n self.SortByCombo = []\r\n self.OutcomeCombo = []\r\n \r\n \r\n for k in range(self.naxes):\r\n self.PlotTypeTable.setRowHeight(k,20)\r\n self.PlotTypeCombo.append(QtGui.QComboBox(self))\r\n self.PlotTypeCombo[-1].addItems(['Later', 'RT Dist', 'Perf Evol', 'Behavior'])\r\n self.PlotTypeCombo[-1].currentIndexChanged.connect(self.PlotTypeStatus)\r\n self.PlotTypeTable.setCellWidget(k,0,self.PlotTypeCombo[-1])\r\n\r\n self.Stim2PlotCombo.append(QtGui.QComboBox(self))\r\n for n in ['Stim0', 'Stim1', 'Stim2']:\r\n self.Stim2PlotCombo[-1].addItem(n, QtCore.QVariant(n))\r\n self.Stim2PlotCombo[-1].currentIndexChanged.connect(self.Stim2PlotChanged_Proc)\r\n self.PlotTypeTable.setCellWidget(k,1,self.Stim2PlotCombo[-1])\r\n\r\n self.ReferenceCombo.append(QtGui.QComboBox(self))\r\n self.ReferenceCombo[-1].addItems(['CentNP','Stim','NpExit','RpIn','1stLick'])\r\n self.ReferenceCombo[-1].setCurrentIndex(1)\r\n self.PlotTypeTable.setCellWidget(k,2,self.ReferenceCombo[-1])\r\n \r\n self.OutcomeCombo.append(QtGui.QComboBox(self))\r\n self.OutcomeCombo[-1].addItems(['All','Hits','Error','Miss'])\r\n self.PlotTypeTable.setCellWidget(k,3,self.OutcomeCombo[-1])\r\n \r\n self.SortByCombo.append(QtGui.QComboBox(self))\r\n self.SortByCombo[-1].addItems(['No sort','CentNP','Stim','NpExit','RpIn','1stLick','3rdLick',\r\n 'RT0/CentNP','RT0/Stim','RT0/NpExit','RT0/RpIn','RT0/1stLick','RT0/3rdLick'])\r\n self.PlotTypeTable.setCellWidget(k,4,self.SortByCombo[-1])\r\n\r\n self.PlotTypeCombo[0].setCurrentIndex(1)\r\n self.PlotTypeCombo[0].setCurrentIndex(0)\r\n self.PlotTypeCombo[3].setCurrentIndex(1)\r\n for k in [1,2,4,5]:\r\n self.PlotTypeCombo[k].setCurrentIndex(3)\r\n \r\n self.PlotTypeTable.setFont(QtGui.QFont(self.PlotTypeTable.font().family(),8))\r\n vlay.addWidget(self.PlotTypeTable)\r\n\r\n grp2.setLayout(vlay)\r\n left_panel_lay.addWidget(grp2)\r\n grp2.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)\r\n grp2.updateGeometry()\r\n\r\n## left_panel_lay.addStretch(1)\r\n ###################################################################################################\r\n ## Rat Meta info Panel\r\n\r\n grp4 = QtGui.QGroupBox('MetaInfo', self.left_panel)\r\n vlay = QtGui.QVBoxLayout()\r\n vlay.setSpacing(2)\r\n vlay.setMargin(2)\r\n \r\n self.MetaInfoTable = QtGui.QTableWidget(6,1,self)\r\n self.MetaInfoTable.setVerticalHeaderLabels(['RatName', 'File', 'Date', 'MSN', 'Box', 'Group'])\r\n self.MetaInfoTable.setHorizontalHeaderLabels([''])\r\n self.MetaInfoTable.horizontalHeader().setVisible(False)\r\n self.MetaInfoTable.setFont(QtGui.QFont(self.MetaInfoTable.font().family(),8))\r\n self.MetaInfoTable.setColumnWidth(0,350)\r\n for k in range(6): self.MetaInfoTable.setRowHeight(k,20)\r\n vlay.addWidget(self.MetaInfoTable)\r\n \r\n grp4.setLayout(vlay)\r\n left_panel_lay.addWidget(grp4)\r\n \r\n ###################################################################################################\r\n\r\n grp3 = QtGui.QGroupBox('Plot Control', self.left_panel)\r\n vlay = QtGui.QVBoxLayout()\r\n vlay.setSpacing(2)\r\n vlay.setMargin(2)\r\n \r\n self.PlotBtn = QtGui.QPushButton('Plot Figure',self.main_widget)\r\n self.PlotBtn.clicked.connect(self.Plot)\r\n vlay.addWidget(self.PlotBtn)\r\n \r\n hlay = QtGui.QHBoxLayout()\r\n self.PrevBtn = QtGui.QPushButton('<< Prev',self.main_widget)\r\n self.PrevBtn.clicked.connect(self.PrevPlot)\r\n hlay.addWidget(self.PrevBtn)\r\n self.NextBtn = QtGui.QPushButton('Next >>',self.main_widget)\r\n self.NextBtn.clicked.connect(self.NextPlot)\r\n hlay.addWidget(self.NextBtn)\r\n vlay.addLayout(hlay)\r\n\r\n hlay = QtGui.QHBoxLayout()\r\n self.SaveFigCheck = QtGui.QCheckBox('Save each fig ?', self.main_widget)\r\n hlay.addWidget(self.SaveFigCheck)\r\n self.SaveFigBtn = QtGui.QPushButton('Save Fig',self.main_widget)\r\n self.SaveFigBtn.clicked.connect(self.SaveFig)\r\n hlay.addWidget(self.SaveFigBtn)\r\n vlay.addLayout(hlay)\r\n\r\n self.PanelBtn = QtGui.QPushButton('Panel')\r\n self.PanelBtn.clicked.connect(self.showPanel_Proc)\r\n vlay.addWidget(self.PanelBtn)\r\n grp3.setLayout(vlay)\r\n left_panel_lay.addWidget(grp3)\r\n left_panel_lay.addStretch(1)\r\n self.main_layout.addWidget(self.left_panel)\r\n\r\n \r\n \r\n ###################################################################################################\r\n\r\n self.right_panel = QtGui.QWidget(self)\r\n \r\n self.main_fig = MplWidget(self.right_panel)\r\n self.fig = self.main_fig.figure\r\n self.fig.canvas.mpl_connect('key_press_event', self.on_key)\r\n self.ntb = NavToolbar(self.main_fig, self.main_widget)\r\n self.ntb.setIconSize(QtCore.QSize(12,12))\r\n self.ntb.setFloatable(True)\r\n self.ntb.setMovable(True)\r\n\r\n vlay = QtGui.QVBoxLayout(self.right_panel)\r\n vlay.setMargin(0)\r\n vlay.setSpacing(0)\r\n self.selFiles = QtGui.QComboBox(self.main_widget)\r\n## QObj.connect(self.selFiles, QtCore.SIGNAL('currentIndexChanged(int)'), self.Plot)\r\n vlay.addWidget(self.selFiles)\r\n vlay.addWidget(self.main_fig)\r\n vlay.addWidget(self.ntb)\r\n\r\n self.right_panel.setLayout(vlay)\r\n self.main_layout.addWidget(self.right_panel)\r\n## self.main_fig.figure.canvas.mpl_connect('draw_event', self.draw_callback)\r\n\r\n self.ax = []\r\n for k in range(1,self.naxes+1):\r\n self.ax.append(self.main_fig.figure.add_subplot(self.AxNRows.value(),\r\n self.AxNCols.value(), k))\r\n\r\n if sys.platform == 'linux2':\r\n QtGui.QApplication.setStyle(QtGui.QStyleFactory.create('Plastique'))\r\n \r\n self.main_widget.setLayout(self.main_layout)\r\n self.setCentralWidget(self.main_widget)\r\n self.show()\r\n\r\n self.PanelWidget = QtGui.QTableWidget(10,2)\r\n for k in range(self.PanelWidget.rowCount()):\r\n self.PanelWidget.setRowHeight(k,20)\r\n self.PanelWidget.setCellWidget(k,0,QtGui.QLabel('Hola'))\r\n\r\n def showPanel_Proc(self):\r\n self.PanelWidget.show()\r\n\r\n def editSettings(self):\r\n settings.edit()\r\n \r\n def HitsTypeChanged_Proc(self):\r\n if self.RTPlotTypeCombo.currentText() not in ['Hist StepF','Hist Step', 'Hist Cum']:\r\n self.HistNBins.setDisabled(True)\r\n else:\r\n self.HistNBins.setEnabled(True)\r\n \r\n def PlotTypeStatus(self, indx1, indx2=None):\r\n\r\n if indx2 is not None:\r\n indx = indx2\r\n else:\r\n sender = self.sender()\r\n indx = self.PlotTypeCombo.index(sender)\r\n\r\n if self.PlotTypeCombo[indx].currentText() == 'Behavior':\r\n if hasattr(self,'Data'):\r\n if re.search('RT', str(self.ReferenceCombo[indx].currentText()))\\\r\n or self.oldMSN != self.Data['MSN'] or str(self.Stim2PlotCombo[indx].itemText(0)) == 'All':\r\n self.ReferenceCombo[indx].clear()\r\n self.ReferenceCombo[indx].addItems(['CentNP','Stim','NpExit','RpIn','1stLick'])\r\n self.ReferenceCombo[indx].setCurrentIndex(1)\r\n self.Stim2PlotCombo[indx].clear()\r\n for l in bhv.FindStims(self.Data):\r\n self.Stim2PlotCombo[indx].addItem(self.Data[l]['Descr'], QtCore.QVariant(l))\r\n self.Stim2PlotCombo[indx].setEnabled(True)\r\n self.SortByCombo[indx].setEnabled(True)\r\n self.OutcomeCombo[indx].setEnabled(True)\r\n self.ReferenceCombo[indx].setEnabled(True)\r\n \r\n elif self.PlotTypeCombo[indx].currentText() == 'RT Dist':\r\n if hasattr(self,'Data'):\r\n if not re.search('RT', str(self.ReferenceCombo[indx].currentText())) or self.oldMSN != self.Data['MSN']:\r\n self.ReferenceCombo[indx].clear()\r\n self.ReferenceCombo[indx].addItems(['RT1','RT2','RT3','RT4','RTT'])\r\n self.Stim2PlotCombo[indx].clear()\r\n self.Stim2PlotCombo[indx].addItem('All', QtCore.QVariant('All'))\r\n for k in bhv.FindStims(self.Data):\r\n self.Stim2PlotCombo[indx].addItem(self.Data[k]['Descr'], QtCore.QVariant(k))\r\n self.Stim2PlotCombo[indx].setEnabled(True)\r\n self.SortByCombo[indx].setEnabled(False)\r\n self.OutcomeCombo[indx].setEnabled(False)\r\n self.ReferenceCombo[indx].setEnabled(True)\r\n \r\n elif self.PlotTypeCombo[indx].currentText() == 'Perf Evol':\r\n if str(self.Stim2PlotCombo[indx].itemText(0)) != 'All':\r\n self.Stim2PlotCombo[indx].clear()\r\n self.Stim2PlotCombo[indx].addItem('All', QtCore.QVariant('All'))\r\n for l in bhv.FindStims(self.Data):\r\n self.Stim2PlotCombo[indx].addItem(self.Data[l]['Descr'], QtCore.QVariant(l))\r\n \r\n self.Stim2PlotCombo[indx].setEnabled(True)\r\n self.SortByCombo[indx].setEnabled(False)\r\n self.OutcomeCombo[indx].setEnabled(False)\r\n self.ReferenceCombo[indx].setEnabled(False)\r\n\r\n elif self.PlotTypeCombo[indx].currentText() == 'Later':\r\n self.Stim2PlotCombo[indx].setEnabled(False)\r\n self.SortByCombo[indx].setEnabled(False)\r\n self.ReferenceCombo[indx].setEnabled(False)\r\n self.OutcomeCombo[indx].setEnabled(False)\r\n\r\n def Stim2PlotChanged_Proc(self):\r\n sender = self.sender()\r\n indx = self.Stim2PlotCombo.index(sender)\r\n if re.search('RT1',sender.currentText()):\r\n self.OutcomeCombo[indx].setCurrentIndex(1)\r\n else:\r\n self.OutcomeCombo[indx].setCurrentIndex(0)\r\n \r\n def SetNAxesProc(self):\r\n self.main_fig.figure.clear()\r\n\r\n self.naxes = self.AxNRows.value()*self.AxNCols.value()\r\n \r\n if self.naxes > len(self.ax):\r\n for k in range(len(self.ax), self.naxes):\r\n self.PlotTypeTable.insertRow(k)\r\n self.PlotTypeTable.setRowHeight(k,20)\r\n self.PlotTypeCombo.append(QtGui.QComboBox(self))\r\n self.PlotTypeCombo[-1].addItems(['Later', 'RT Dist', 'Perf Evol', 'Behavior'])\r\n self.PlotTypeCombo[-1].currentIndexChanged.connect(self.PlotTypeStatus)\r\n self.PlotTypeTable.setCellWidget(k,0,self.PlotTypeCombo[-1])\r\n\r\n self.Stim2PlotCombo.append(QtGui.QComboBox(self))\r\n if hasattr(self, 'Data'):\r\n for l in bhv.FindStims(self.Data):\r\n self.Stim2PlotCombo[-1].addItem(self.Data[l]['Descr'], QtCore.QVariant(l))\r\n self.PlotTypeTable.setCellWidget(k, 1, self.Stim2PlotCombo[-1])\r\n\r\n self.ReferenceCombo.append(QtGui.QComboBox(self))\r\n self.ReferenceCombo[-1].addItems(['CentNP','Stim','NpExit','RpIn','1stLick'])\r\n self.ReferenceCombo[-1].setCurrentIndex(1)\r\n self.PlotTypeTable.setCellWidget(k,2,self.ReferenceCombo[-1])\r\n \r\n self.OutcomeCombo.append(QtGui.QComboBox(self))\r\n self.OutcomeCombo[-1].addItems(['All','Hits','Error','Miss'])\r\n self.PlotTypeTable.setCellWidget(k,3,self.OutcomeCombo[-1])\r\n\r\n self.SortByCombo.append(QtGui.QComboBox(self))\r\n self.SortByCombo[-1].addItems(['No sort','CentNP','Stim','NpExit','RpIn','1stLick','3rdLick',\r\n 'RT0/CentNP','RT0/Stim','RT0/NpExit','RT0/RpIn','RT0/1stLick','RT0/3rdLick'])\r\n self.PlotTypeTable.setCellWidget(k,4,self.SortByCombo[-1])\r\n \r\n elif self.naxes < len(self.ax):\r\n for k in range(len(self.ax)-1, self.naxes-1, -1):\r\n self.PlotTypeTable.removeRow(k)\r\n self.PlotTypeCombo.pop(k)\r\n self.Stim2PlotCombo.pop(k)\r\n self.SortByCombo.pop(k)\r\n self.OutcomeCombo.pop(k)\r\n self.ReferenceCombo.pop(k)\r\n \r\n self.PlotTypeTable.setVerticalHeaderLabels(['Ax%d' % k for k in range(1,self.naxes+1)])\r\n self.ax = []\r\n for k in range(1,self.naxes+1):\r\n self.ax.append(self.main_fig.figure.add_subplot(self.AxNRows.value(), self.AxNCols.value(),k))\r\n \r\n self.Plot()\r\n\r\n def GetFiles(self):\r\n self.selFiles.clear() \r\n self.regExp = str(self.RegExpEdit.text())\r\n self.ratname = str(self.selRatName.currentText())\r\n files,_= bhv.GetFilenames(self.ratname, self.regExp, self.BhvDir)\r\n if files: self.selFiles.addItems(files)\r\n\r\n def LoadData(self):\r\n self.Data = bhv.LoadBehFile(str(self.selFiles.currentText()))\r\n self.Map = bhv.GetMapping(self.Data)\r\n\r\n def SaveFig(self):\r\n FigName = os.path.split(str(self.selFiles.currentText()))[1]\r\n FigName = FigName.replace('.beh',settings.Format)\r\n self.fig.savefig(os.path.join(settings.ImgDir,FigName),\r\n dpi = settings.dpi)\r\n \r\n def on_key(self, event): \r\n if event.key=='alt': self.NextPlot()\r\n elif event.key=='control': self.PrevPlot()\r\n \r\n def PrevPlot(self):\r\n if self.selFiles.count() > 0 and self.selFiles.currentIndex() > 0:\r\n self.selFiles.setCurrentIndex(self.selFiles.currentIndex()-1)\r\n## self.StBarOldMsg = self.StBar.currentMessage()\r\n## self.StBar.clearMessage()\r\n self.Plot()\r\n else:\r\n pass\r\n## self.StBar.showMessage('You have reached the first file !')\r\n\r\n def NextPlot(self):\r\n c = self.selFiles.count()\r\n if c > 0 and self.selFiles.currentIndex() < c-1:\r\n self.selFiles.setCurrentIndex(self.selFiles.currentIndex()+1)\r\n## self.StBarOldMsg = self.StBar.currentMessage()\r\n## self.StBar.clearMessage()\r\n self.Plot()\r\n else:\r\n pass\r\n## self.StBar.showMessage('You have reached the last file !')\r\n \r\n def Plot(self):\r\n \r\n if self.selFiles.count()>0:\r\n Data = bhv.LoadBehFile(str(self.selFiles.currentText()))\r\n Map = bhv.GetMapping(Data)\r\n stims = bhv.FindStims(Data)\r\n self.Data = Data\r\n for j,k in enumerate(self.PlotTypeCombo):\r\n self.PlotTypeStatus(None, j)\r\n self.oldMSN = Data['MSN']\r\n else:\r\n return\r\n \r\n if Map==1:\r\n Resp=['Lick','RpIn']\r\n elif Map==2:\r\n Resp=['Lick','RpIn']\r\n\r\n self.MetaInfoTable.setItem(0, 0, QtGui.QTableWidgetItem(Data['Subject']))\r\n self.MetaInfoTable.setItem(1, 0, QtGui.QTableWidgetItem(Data['File'].split('\\\\')[-1]))\r\n self.MetaInfoTable.setItem(2, 0, QtGui.QTableWidgetItem('%02d/%02d/%02d' % tuple(Data['StartDate'])))\r\n self.MetaInfoTable.setItem(3, 0, QtGui.QTableWidgetItem(Data['MSN']))\r\n self.MetaInfoTable.setItem(4, 0, QtGui.QTableWidgetItem(str(Data['Box'])))\r\n self.MetaInfoTable.setItem(5, 0, QtGui.QTableWidgetItem(str(Data['Group'])))\r\n\r\n self.TWin = [self.TWin1.value(), self.TWin2.value()]\r\n\r\n for k in range(self.naxes):\r\n plottype = str(self.PlotTypeCombo[k].currentText())\r\n stim = str(self.Stim2PlotCombo[k].itemData(self.Stim2PlotCombo[k].currentIndex()).toString())\r\n curax = self.main_fig.figure.axes[k]\r\n sortby = str(self.SortByCombo[k].currentText())\r\n ref = str(self.ReferenceCombo[k].currentText())\r\n curax.cla()\r\n \r\n if self.OutcomeCombo[k].currentText() == 'All':\r\n TS = 'StimTS'\r\n elif self.OutcomeCombo[k].currentText() == 'Hits':\r\n TS = 'HitsTS'\r\n elif self.OutcomeCombo[k].currentText() == 'Error':\r\n TS = 'ErrTS'\r\n elif self.OutcomeCombo[k].currentText() == 'Miss':\r\n TS = 'MissTS'\r\n \r\n if plottype == 'Later':\r\n PlotLATER(Data, ShowFit=False, OutlierCalc = 'mc', ax = curax, ShowEq=0)\r\n curax.set_title('LATER')\r\n if curax.yaxis_inverted():\r\n curax.invert_yaxis()\r\n \r\n elif plottype == 'RT Dist':\r\n if curax.yaxis_inverted():\r\n curax.invert_yaxis()\r\n curax.set_autoscale_on(True)\r\n \r\n if self.Stim2PlotCombo[k].currentText() == 'All':\r\n stims = bhv.FindStims(Data)\r\n else:\r\n stims = [str(self.Stim2PlotCombo[k].currentText())]\r\n whatRT = self.ReferenceCombo[k].currentText()\r\n\r\n for l in stims:\r\n \r\n if whatRT == 'RT1':\r\n RT, _ = bhv.DistXY(Data[l]['StimTS'], Data[l]['NpOut'])\r\n elif whatRT == 'RT2':\r\n RT = Data[l]['RT2']\r\n elif whatRT == 'RT3':\r\n RT = Data[l]['RT3']\r\n elif whatRT == 'RT4':\r\n RT = Data[l]['RT4']\r\n elif whatRT == 'RTT':\r\n RT = Data[l]['RTT']\r\n else:\r\n continue\r\n\r\n color = self.StimColors[int(re.search('[0-9]',l).group())]\r\n if self.RTPlotTypeCombo.currentText()=='Hist StepF':\r\n if RT.size > 3:\r\n curax.hist(RT, self.HistNBins.value(), range=(0, self.HistTWin.value()), lw = 0,\r\n histtype='stepfilled', alpha = 0.8, label = Data[l]['Descr'],\r\n color=color)\r\n else:\r\n curax.plot([], label='_nolegend_')\r\n\r\n elif self.RTPlotTypeCombo.currentText()=='Hist Step':\r\n if RT.size>3:\r\n curax.hist(RT, self.HistNBins.value(), range=(0, self.HistTWin.value()), lw = 3,\r\n histtype='step', label = Data[l]['Descr'],\r\n color=color)\r\n else:\r\n curax.plot([], label='_nolegend_')\r\n\r\n elif self.RTPlotTypeCombo.currentText()=='Hist Cum':\r\n if RT.size>3:\r\n curax.hist(RT, self.HistNBins.value(), cumulative=True, range=(0, self.HistTWin.value()),\r\n lw = 3, histtype='step', label = Data[l]['Descr'], color=color)\r\n else:\r\n curax.plot([], label='_nolegend_')\r\n\r\n \r\n elif self.RTPlotTypeCombo.currentText()=='PDFs':\r\n if RT.size>3:\r\n pdf = gaussian_kde(RT)\r\n x = np.linspace(0, self.HistTWin.value(), 100)\r\n y = pdf(x)\r\n y = y/np.sum(y)\r\n curax.plot(x, y, lw=5, label = Data[l]['Descr'], color=color)\r\n else:\r\n curax.plot([], label='_nolegend_')\r\n elif self.RTPlotTypeCombo.currentText()=='CDFs':\r\n if RT.size>3:\r\n pdf = gaussian_kde(RT)\r\n x = np.linspace(0, self.HistTWin.value(), 100)\r\n y = np.cumsum(pdf(x))\r\n y = y/y[-1]\r\n curax.plot(x, y, lw=5, label = Data[l]['Descr'], color=color)\r\n else:\r\n curax.plot([], label='_nolegend_')\r\n \r\n curax.set_title(whatRT)\r\n curax.set_xlim(0, self.HistTWin.value())\r\n curax.set_ylabel('Count')\r\n curax.legend(fancybox=True, prop={'size':10})\r\n curax.grid()\r\n \r\n elif plottype == 'Behavior':\r\n \r\n if ref == 'CentNP':\r\n _, indx = bhv.DistYX(Data[stim][TS], Data[stim]['NpIn'])\r\n Ref = Data[stim]['NpIn'][indx]\r\n elif ref == 'Stim':\r\n Ref = Data[stim][TS]\r\n elif ref == 'NpExit':\r\n _, indx = bhv.DistXY(Data[stim][TS], Data[stim]['NpOut'])\r\n Ref = Data[stim]['NpOut'][indx]\r\n elif ref == 'RpIn':\r\n _, indx = bhv.DistXY(Data[stim][TS], Data[stim][Resp[1]])\r\n Ref = Data[stim][Resp[1]][indx]\r\n elif ref == '1stLick':\r\n _, indx = bhv.DistXY(Data[stim][TS], Data[stim][Resp[0]])\r\n Ref = Data[stim][Resp[0]][indx]\r\n\r\n l = ['No sort','CentNP','Stim','NpExit','RpIn','1stLick','3rdLick'] \r\n\r\n # in case of no sort\r\n if sortby == 'No sort' or ref==sortby:\r\n RT = range(len(Ref))\r\n \r\n elif re.search('CentNP', sortby):\r\n if l.index(ref) < 1:\r\n RT, indx = bhv.DistXY(Ref, Data[stim]['NpIn'])\r\n elif l.index(ref) > 1:\r\n RT, indx = bhv.DistYX(Ref, Data[stim]['NpIn'])\r\n \r\n elif re.search('Stim', sortby):\r\n if l.index(ref) < 2:\r\n RT , indx = bhv.DistXY(Ref, Data[stim]['StimTS'])\r\n elif l.index(ref)>2:\r\n RT , indx = bhv.DistYX(Ref, Data[stim]['StimTS'])\r\n \r\n elif re.search('NpExit', sortby):\r\n if l.index(ref) < 3:\r\n RT, indx = bhv.DistXY(Ref, Data[stim]['NpOut'])\r\n elif l.index(ref) > 3:\r\n RT, indx = bhv.DistYX(Ref, Data[stim]['NpOut'])\r\n\r\n elif re.search('RpIn', sortby):\r\n if l.index(ref) < 4:\r\n RT, indx = bhv.DistXY(Ref, Data[stim][Resp[1]])\r\n elif l.index(ref) > 4:\r\n RT, indx = bhv.DistYX(Ref, Data[stim][Resp[1]])\r\n \r\n elif re.search('1stLick', sortby):\r\n if l.index(ref) < 5:\r\n RT, indx = bhv.DistXY(Ref, Data[stim][Resp[0]])\r\n elif l.index(ref) > 5:\r\n RT, indx = bhv.DistYX(Ref, Data[stim][Resp[0]])\r\n \r\n elif re.search('3rdLick', sortby):\r\n res = bhv.GetHits(Data[stim][TS], Data[stim][Resp[0]])\r\n if l.index(ref) < 6: \r\n RT, indx = bhv.DistXY(Ref, res['ThirdLickHitTS'])\r\n elif l.index(ref) > 6:\r\n RT, indx = bhv.DistYX(Ref, res['ThirdLickHitTS'])\r\n \r\n s = np.argsort(RT)\r\n\t\t\r\n '''if sortby == 'RT0/NpExit':\r\n ipdb.set_trace()'''\r\n \r\n if re.search('RT0', sortby):\r\n RT0, _, _ = bhv.SparseDistance(Data[stim]['StimTS'], Data[stim]['NpIn'],\r\n direction = 'yx')\r\n RT0 = np.round_(RT0, 3)\r\n uRT0 = np.sort(np.unique(RT0))\r\n s=[]\r\n\r\n for k in uRT0:\r\n indx = np.flatnonzero(RT0 == k)\r\n s.extend(indx[np.argsort(RT[indx])])\r\n \r\n yvar = self.YAxesVarCombo.currentText()\r\n \r\n Raster3(Ref[s], Data[stim][Resp[0]], yAxVar=yvar,\r\n TWin=self.TWin, ax=curax, color=[.5,.5,.5], alpha=0.5, lw = 1) \r\n\r\n Raster3(Ref[s], Data[stim]['NpIn'], yAxVar=yvar,\r\n TWin=self.TWin, ax=curax, color='b', lw = 2)\r\n\r\n Raster3(Ref[s], Data[stim]['NpOut'], yAxVar=yvar,\r\n TWin=self.TWin, ax=curax, color='c', lw = 2)\r\n\r\n Raster3(Ref[s], Data[stim][Resp[1]], yAxVar=yvar,\r\n TWin=self.TWin, ax=curax, color='g', lw = 2)\r\n \r\n if Data[stim].has_key('Solnd'):\r\n Raster3(Ref[s], Data[stim]['Solnd'], yAxVar=yvar,\r\n TWin=self.TWin, ax=curax, color='r', lw=2)\r\n\r\n Raster3(Ref[s], Data[stim]['StimTS'], yAxVar=yvar,\r\n TWin=self.TWin, ax=curax, color='k', lw=2)\r\n\r\n curax.set_title(Data[stim]['Descr'])\r\n\r\n if yvar == 'Trial':\r\n curax.set_ylim(len(Ref),0)\r\n curax.set_ylabel('Trial No')\r\n elif yvar == 'Time':\r\n curax.set_ylim(Ref[-1],0)\r\n curax.set_ylabel('Session Time (min)')\r\n curax.set_yticks(range(0,3700,600))\r\n curax.set_yticklabels(range(0,70,10))\r\n \r\n elif plottype=='Perf Evol':\r\n '''Performs a circular convolution to obtain the hit rate'''\r\n if curax.yaxis_inverted(): curax.invert_yaxis()\r\n \r\n ktype = str(self.KernelType.currentText())\r\n ksize = self.KernelSize.value()\r\n \r\n if ktype == 'Hamming':\r\n kernel = np.hamming(ksize)\r\n elif ktype == 'Square':\r\n kernel = np.ones(ksize)\r\n\r\n if self.Stim2PlotCombo[k].currentText() == 'All':\r\n stims = bhv.FindStims(Data)\r\n else:\r\n stims = [self.Stim2PlotCombo[k].itemData(self.Stim2PlotCombo[k].currentIndex()).toString()]\r\n \r\n for l in stims:\r\n color = self.StimColors[int(re.search('[0-9]',l).group())]\r\n \r\n if self.Data[l].has_key('HitsTS'):\r\n if self.Data[l]['HitsTS'].any() and self.Data[l]['HitsTS'].shape:\r\n TS = np.int32(np.round(self.Data[l]['HitsTS']))\r\n a = np.zeros(TS[-1]+1)\r\n a[TS]=1\r\n b = np.concatenate((kernel, np.zeros(len(a)-len(kernel)))) # pad with zeros\r\n resp = np.fft.ifft(np.fft.fft(a) * np.fft.fft(b)).real # perform the multiplication of the fourier transforms\r\n time = np.arange(len(a))/60.0\r\n resp = 60*resp/sum(kernel)\r\n if time[-1] < 60:\r\n time = np.append(time, time[-1]+0.1)\r\n time = np.append(time, 60)\r\n resp = np.append(resp, 0)\r\n resp = np.append(resp, 0)\r\n curax.plot(time, resp, color=color, lw=3, label=self.Data[l]['Descr']) # plot\r\n else:\r\n curax.plot(range(60), np.zeros(60), color=color, lw=3, label=self.Data[l]['Descr'])\r\n \r\n curax.grid(axis='y')\r\n curax.legend(loc=0, fancybox=True, prop={'size':10})\r\n curax.set_title('Performance')\r\n curax.set_xlim(0, 61)\r\n curax.set_xlabel('Time (min)')\r\n curax.set_ylabel('Resp/min')\r\n\r\n self.main_fig.figure.tight_layout()\r\n self.main_fig.figure.canvas.draw()\r\n\r\n if self.SaveFigCheck.checkState()>0:\r\n self.SaveFig()\r\n \r\n########################################################################################################################\r\n\r\nclass LearningCurveGUI(QtGui.QWidget):\r\n \r\n def __init__(self, BhvDir = ''):\r\n self.BhvDir = BhvDir\r\n QtGui.QWidget.__init__(self)\r\n self.setWindowTitle(\"Learning Curve Explorer\")\r\n \r\n mainLay = QtGui.QHBoxLayout(self)\r\n \r\n splitter1 = QtGui.QSplitter(QtCore.Qt.Horizontal)\r\n frame1 = QtGui.QFrame()\r\n frame1.setFrameStyle(QtGui.QFrame.StyledPanel)\r\n \r\n vLayFrame1 = QtGui.QVBoxLayout(frame1)\r\n \r\n # regular expression combo box\r\n grp = QtGui.QGroupBox('Search Files')\r\n vLay = QtGui.QVBoxLayout(grp)\r\n # rat name combobox \r\n self.RatNameCombo = QtGui.QComboBox(self)\r\n self.RatNameCombo.addItems(bhv.GetRatNames(pth = BhvDir))\r\n self.RatNameCombo.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)\r\n hLay = QtGui.QHBoxLayout()\r\n hLay.setMargin(0)\r\n hLay.addWidget(QtGui.QLabel('Select Rat Name'))\r\n hLay.addWidget(self.RatNameCombo)\r\n vLay.addLayout(hLay)\r\n \r\n self.RegExpText = QtGui.QTextEdit()\r\n self.defaultRegExp = 'NP0[0-4](A)?_(RSip|LSip|CentWht|CWht|[0-9]{1,2}K|NOISE|CLICK)R_(RSip|LSip|CentWht|CWht|[0-9]{1,2}K|NOISE|CLICK)L'\r\n self.RegExpText.setText(self.defaultRegExp)\r\n self.RegExpText.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)\r\n self.RegExpText.setMaximumHeight(60)\r\n vLay.addWidget(self.RegExpText)\r\n # add a 'set default reg exp' button\r\n self.setDefaultRegExpBtn = QtGui.QPushButton('Reset Reg Exp')\r\n self.setDefaultRegExpBtn.clicked.connect(self.setDefaultRegExpProc)\r\n vLay.addWidget(self.setDefaultRegExpBtn)\r\n \r\n self.dateCheck = QtGui.QCheckBox('Include Sessions After:')\r\n self.dateEdit = QtGui.QDateEdit()\r\n self.dateEdit.setDate(QtCore.QDate(2011,1,1))\r\n hLay = QtGui.QHBoxLayout()\r\n hLay.setMargin(0)\r\n hLay.addWidget(self.dateCheck)\r\n hLay.addWidget(self.dateEdit)\r\n vLay.addLayout(hLay)\r\n \r\n self.searchFilesBtn = QtGui.QPushButton('Search Files')\r\n self.searchFilesBtn.clicked.connect(self.searchFilesProc)\r\n self.searchFilesBtn.setStyleSheet('QPushButton{background-color: rgba(243,134,48)}')\r\n vLay.addWidget(self.searchFilesBtn)\r\n \r\n \r\n self.filesTable = QtGui.QTableWidget(0, 1)\r\n self.filesTable.verticalHeader().setVisible(False)\r\n self.filesTable.setHorizontalHeaderLabels(['Include Files'])\r\n self.filesTable.setColumnWidth(0, 300)\r\n #self.filesTable.horizontalHeader().setVisible(False)\r\n self.filesTable.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)\r\n vLay.addWidget(self.filesTable)\r\n \r\n '''self.selectBtn = QtGui.QPushButton('Include All')\r\n self.selectBtn.setCheckable(True)\r\n self.selectBtn.clicked.connect(self.selectProc)\r\n\r\n self.selectInvertBtn = QtGui.QPushButton('Toggle Sel')\r\n self.selectInvertBtn.clicked.connect(self.selectInvertProc)\r\n hLay = QtGui.QHBoxLayout()\r\n hLay.setMargin(0)\r\n hLay.addWidget(self.selectBtn)\r\n hLay.addWidget(self.selectInvertBtn)\r\n vLay.addLayout(hLay)'''\r\n \r\n vLayFrame1.addWidget(grp)\r\n \r\n # include RTs checkbox\r\n grp = QtGui.QGroupBox('Plot options')\r\n \r\n grpLay = QtGui.QGridLayout(grp)\r\n \r\n # Raw vs proportion plot\r\n self.PlotOptionCheck = QtGui.QCheckBox('Raw / Proportion')\r\n self.PlotOptionCheck.setChecked(True)\r\n grpLay.addWidget(self.PlotOptionCheck, 0, 0, 1, 4)\r\n \r\n self.IncRTCheck = []\r\n \r\n self.IncRTCheck.append(QtGui.QCheckBox('RT1'))\r\n self.IncRTCheck[-1].setChecked(True)\r\n grpLay.addWidget(self.IncRTCheck[-1], 1, 0)\r\n \r\n self.IncRTCheck.append(QtGui.QCheckBox('RT2'))\r\n self.IncRTCheck[-1].setChecked(True)\r\n grpLay.addWidget(self.IncRTCheck[-1], 1, 1)\r\n \r\n self.IncRTCheck.append(QtGui.QCheckBox('RT3'))\r\n self.IncRTCheck[-1].setChecked(True)\r\n grpLay.addWidget(self.IncRTCheck[-1], 1, 2)\r\n \r\n self.IncRTCheck.append(QtGui.QCheckBox('RT4'))\r\n self.IncRTCheck[-1].setChecked(True)\r\n grpLay.addWidget(self.IncRTCheck[-1], 1, 3)\r\n \r\n # add a plot button\r\n self.PlotBtn = QtGui.QPushButton('Plot')\r\n self.PlotBtn.clicked.connect(self.Plot)\r\n self.PlotBtn.setStyleSheet('QPushButton{background-color: rgba(0,190,0)}')\r\n grpLay.addWidget(self.PlotBtn, 2, 0, 1, 2)\r\n \r\n self.saveFigBtn = QtGui.QPushButton('Save Figure')\r\n self.saveFigBtn.clicked.connect(self.saveFigProc)\r\n self.saveFigBtn.setStyleSheet('QPushButton{background-color: rgba(8,107,154)}')\r\n grpLay.addWidget(self.saveFigBtn, 2, 2, 1, 2)\r\n \r\n vLayFrame1.addWidget(grp)\r\n #vLayFrame1.addStretch(1)\r\n \r\n # add a settings button\r\n self.settingsBtn = QtGui.QPushButton('Settings')\r\n self.settingsBtn.clicked.connect(self.settingsProc)\r\n vLayFrame1.addWidget(self.settingsBtn)\r\n \r\n splitter1.addWidget(frame1) \r\n \r\n frame2 = QtGui.QFrame()\r\n frame2.setFrameStyle(QtGui.QFrame.StyledPanel)\r\n vLayFrame2 = QtGui.QVBoxLayout(frame2)\r\n vLayFrame2.setMargin(0)\r\n vLayFrame2.setSpacing(0)\r\n \r\n # add a figure and a toolbar\r\n self.MainFig = MplWidget()\r\n self.Ntb = NavToolbar(self.MainFig, self)\r\n self.Ntb.setIconSize(QtCore.QSize(15,15))\r\n #vLayFrame2.addLayout(gLay)\r\n vLayFrame2.addWidget(self.MainFig)\r\n vLayFrame2.addWidget(self.Ntb)\r\n \r\n splitter1.addWidget(frame2)\r\n \r\n mainLay.addWidget(splitter1)\r\n # if running in linux set a certain style for the buttons and widgets\r\n if sys.platform == 'linux2':\r\n QtGui.QApplication.setStyle(QtGui.QStyleFactory.create('Plastique'))\r\n\r\n # finally show the entire gui \r\n self.show()\r\n \r\n def setDefaultRegExpProc(self):\r\n self.RegExpText.setText(self.defaultRegExp)\r\n \r\n def selectProc(self):\r\n if self.filesTable.rowCount() == 0:\r\n return\r\n \r\n state = self.selectBtn.isChecked()\r\n \r\n if state:\r\n self.selectBtn.setText('All Selected')\r\n else:\r\n self.selectBtn.setText('None Selected')\r\n \r\n for n in self.includeFileChecks:\r\n n.setChecked(state)\r\n \r\n def selectInvertProc(self):\r\n if self.filesTable.rowCount() == 0:\r\n return\r\n \r\n for n in self.includeFileChecks:\r\n n.setChecked(not n.checkState())\r\n \r\n def searchFilesProc(self):\r\n \r\n from datetime import date\r\n \r\n RatName = str(self.RatNameCombo.currentText())\r\n Task = str(self.RegExpText.toPlainText())\r\n self.files, MSN = bhv.GetFilenames(RatName, Task, self.BhvDir)\r\n \r\n for n in reversed(range(self.filesTable.rowCount())):\r\n self.filesTable.removeRow(n)\r\n \r\n # delete checkboxes\r\n if hasattr(self, 'includeFileChecks') and len(self.includeFileChecks) > 0:\r\n for n in self.includeFileChecks:\r\n n.deleteLater()\r\n \r\n self.includeFileChecks = []\r\n \r\n if self.dateCheck.checkState():\r\n dateRef = self.dateEdit.date().toPyDate()\r\n else:\r\n dateRef = date(2000,1,1)\r\n \r\n for n, f in enumerate(self.files):\r\n self.filesTable.insertRow(n) \r\n f = os.path.split(f)[1]\r\n self.includeFileChecks.append(QtGui.QCheckBox(f))\r\n self.includeFileChecks[-1].setFont(QtGui.QFont('', pointSize=8))\r\n self.filesTable.setCellWidget(n,0, self.includeFileChecks[-1])\r\n self.filesTable.setRowHeight(n, 20)\r\n \r\n dateStr = re.search('[0-9]{6}', f).group()\r\n dateFile = date(int(dateStr[0:2]) + 2000, int(dateStr[2:4]), int(dateStr[4:]))\r\n \r\n if dateFile >= dateRef:\r\n self.includeFileChecks[-1].setChecked(True) \r\n \r\n self.filesTable.setAlternatingRowColors(True)\r\n \r\n def saveFigProc(self):\r\n fname = str(QtGui.QFileDialog.getSaveFileName(caption = 'Save a copy of the figure ...'))\r\n \r\n if fname:\r\n self.MainFig.figure.savefig(fname, dpi = 300, format = 'svg')\r\n \r\n def settingsProc(self):\r\n if settings.edit():\r\n pass\r\n \r\n def Plot(self):\r\n \r\n if self.filesTable.rowCount() == 0:\r\n self.searchFilesProc()\r\n \r\n RatName = str(self.RatNameCombo.currentText())\r\n Task = str(self.RegExpText.toPlainText())\r\n files, MSN = bhv.GetFilenames(RatName, Task, self.BhvDir)\r\n CalcType = self.PlotOptionCheck.checkState()\r\n IncRTs = self.IncRTCheck[0].checkState()\r\n \r\n files = []\r\n for k in self.includeFileChecks:\r\n if k.checkState():\r\n files.append(os.path.join(self.BhvDir, str(k.text())))\r\n \r\n # create dictionaries to hold hits, miss and error, and colors\r\n keys = ['Tone1', 'Tone2', 'RSipLight', 'LSipLight', 'WhtFLight', 'Catch']\r\n col = ['b', 'g', 'b', 'g', 'r', [.5, .5, .5]]\r\n tmp = {}; colors = {} \r\n for c, k in zip(col, keys):\r\n tmp[k] = []\r\n colors[k] = c\r\n\r\n # create a dictionary for every type of rection time\r\n RTs = ['RT1','RT2','RT3']\r\n RTDict = {}\r\n for k in RTs: RTDict[k] = {}\r\n \r\n # create lists to hold data\r\n MSN = []; dates = []; R = []; L = []\r\n \r\n if RatName == '':\r\n initialDate = dtm(year = 2012, month = 11, day = 29)\r\n exclude = [dtm(2012, 12, 13), dtm(2013, 4, 1)]\r\n else:\r\n initialDate = dtm(1,1,1)\r\n exclude = []\r\n \r\n # iterate over files to extract the information\r\n for j,k in enumerate(files):\r\n \r\n Data = bhv.LoadBehFile(k) # load the data\r\n Stims = bhv.FindStims(Data) # find stimuli\r\n date = Data['StartDate'] # get session date and transform it to a date format\r\n date = dtm(year = date[2]+2000, month = date[0], day = date[1])\r\n \r\n # this is if one wants to exlude some sessions, or include sessions\r\n # after a certain date\r\n if date < initialDate or date in exclude: continue\r\n \r\n # store date and trining protocol\r\n dates.append(Data['StartDate'])\r\n MSN.append(Data['MSN'])\r\n \r\n # iterate over stimuli to extract RTs and performance info\r\n for m, n in enumerate(Stims):\r\n \r\n # get the number of errors\r\n if Data[n].has_key('ErrTS'):\r\n eSize = np.array(Data[n]['ErrTS']).size\r\n else:\r\n eSize = 0\r\n \r\n if Data[n]['Descr'] not in tmp:\r\n tmp[Data[n]['Descr']] = []\r\n # append the number of hits, miss and errors\r\n tmp[Data[n]['Descr']].append([j,\r\n np.array(Data[n]['HitsTS']).size,\r\n eSize,\r\n np.array(Data[n]['MissTS']).size])\r\n \r\n # Extract RTs\r\n # create a key inside each dictionary if it doesn't exists\r\n key = Data[n]['Descr']\r\n for r in RTs:\r\n if not RTDict[r].has_key(key):\r\n RTDict[r][key] = []\r\n \r\n # create a key to hold the session number for a particular RT\r\n if not RTDict[r].has_key('x_'+key):\r\n RTDict[r]['x_'+key] = []\r\n \r\n if key != 'Catch':\r\n # get the reaction times\r\n # this is to eliminate weird RTs coming from a bug in my Med-PC code\r\n Data[n][r] = np.array(Data[n][r])\r\n if Data[n][r].size > 1 and np.any( Data[n][r] > 0.3 ):\r\n RT = Data[n][r][ Data[n][r] < 3.0 ]\r\n else:\r\n RT = Data[n][r]\r\n else:\r\n RT, x,y = bhv.SparseDistance(Data[n]['StimTS'], Data[n]['NpOut'])\r\n \r\n RTDict[r][key].append(RT)\r\n \r\n # get the session number\r\n RTDict[r]['x_'+key].append(j)\r\n\r\n \r\n # get the catchs for the left and the right\r\n lCatch = [k for k in bhv.FindStims(Data) if Data[k]['Descr'] == 'Catch']\r\n if len(lCatch) > 0:\r\n n = lCatch[0]\r\n RLick = Data['EventTS'][np.flatnonzero(Data['EventName'] == 'RightLickOn')][0]\r\n LLick = Data['EventTS'][np.flatnonzero(Data['EventName'] == 'LeftLickOn')][0]\r\n \r\n R.append([j,\r\n Data[n]['StimTS'].size,\r\n bhv.GetHits(Data[n]['StimTS'], RLick)['StimHitsIndx'].size])\r\n L.append([j,\r\n Data[k]['StimTS'].size,\r\n bhv.GetHits(Data[n]['StimTS'], LLick)['StimHitsIndx'].size])\r\n else:\r\n R.append([j, np.nan, np.nan])\r\n L.append([j, np.nan, np.nan])\r\n \r\n \r\n #v = 1000\r\n for k in tmp.keys():\r\n tmp[k] = np.array(tmp[k])\r\n ''' if tmp[k].size > 0 and v > tmp[k][:,0].min():\r\n v = tmp[k][:,0].min()'''\r\n \r\n for k in tmp.keys():\r\n if tmp[k].size > 0:\r\n tmp[k][:,0] = tmp[k][:,0]# - v\r\n \r\n R = np.array(R); R[:,0] = R[:,0]#-v\r\n L = np.array(L); L[:,0] = L[:,0]#-v\r\n \r\n # set some parameters\r\n s = range(len(dates)); lw = 6; ms = 10; a = 0.7\r\n \r\n # clear the figure and create new axes\r\n fig = self.MainFig.figure\r\n fig.clf()\r\n ax = fig.add_subplot(111)\r\n \r\n \r\n # change the labels to reflect the stimulus - outcome association \r\n for k in keys[0:-1]:\r\n t = tmp[k]\r\n if not np.all(np.isnan(t)):\r\n if k == 'Tone1':\r\n label = k+'--> Right'\r\n elif k == 'Tone2':\r\n label = k+'--> Left'\r\n elif k == 'WhtFLight':\r\n label = k+'--> Right'\r\n else:\r\n label = k\r\n \r\n if CalcType:\r\n ax.plot(t[:,0], t[:,1]/np.float32((t[:,1]+t[:,2])+t[:,3]),'-o',\r\n ms=ms, mew = 0, lw=lw, alpha=a, label=label)\r\n else:\r\n ax.plot(t[:,0], t[:,1],'-o',\r\n ms=ms, mew = 0, lw=lw, alpha=a, label=label)\r\n\r\n \r\n # plot the catch trials as a proportion\r\n if CalcType:\r\n \r\n ax.plot(R[:,0], R[:,2]/np.float32(R[:,1]), '--o', color = 'b',\r\n ms=ms, mew = 0, lw=lw, alpha=a, label='catch R')\r\n ax.plot(L[:,0], L[:,2]/np.float32(L[:,1]), '--o', color = 'g',\r\n ms=ms, mew = 0, lw=lw, alpha=a, label='catch L')\r\n \r\n # ... or as raw data\r\n else:\r\n ax.plot(R[:,0], R[:,2], '--o', color = 'b',\r\n ms=ms, mew = 0, lw=lw, alpha=a, label='catch R')\r\n ax.plot(L[:,0], L[:,2], '--o', color = 'g',\r\n ms=ms, mew = 0, lw=lw, alpha=a, label='catch L')\r\n \r\n # put a dotted line before the first session of the white light\r\n if not np.all(np.isnan(tmp['WhtFLight'])):\r\n ax.axvline(tmp['WhtFLight'][0][0]-0.5, color='k', alpha=0.5, linestyle='--', lw=3)\r\n \r\n \r\n # title and labels\r\n ax.set_title('%s %s' % (RatName, Task))\r\n ax.set_xlabel('Session Date')\r\n \r\n # y label for proportion of trials\r\n if CalcType:\r\n ax.set_ylabel('Proportion of Trials')\r\n ax.set_ylim(-0.05, 1.05)\r\n ax.axhline(0.5, 0, 10, lw = 2, linestyle = '--', color='k', alpha = 0.5)\r\n ax.axhline(1.0, 0, 10, lw = 2, linestyle = '--', color='k', alpha = 0.5)\r\n ax.axhline(0.0, 0, 10, lw = 2, linestyle = '--', color='k', alpha = 0.5)\r\n else:\r\n ax.set_ylabel('Number of Trials')\r\n \r\n # set the x axis tick marks to be the training dates\r\n ax.set_xticks(s)\r\n ax.set_xticklabels(['%02d-%02d-%d' % (k[0],k[1],k[2]) for k in dates],\r\n rotation = 30, horizontalalignment = 'right', fontsize = 9)\r\n \r\n # iterate over tick labels, if monday change to bold red\r\n for j,k in zip(ax.get_xmajorticklabels(), dates):\r\n if dtm(k[2],k[0],k[1]).isoweekday() == 1: # check whether it is a monday\r\n j.set_color('r')\r\n j.set_weight('bold')\r\n\r\n \r\n # if selected, plot RT1 for each stimuli\r\n # ... I should add an option to select different RTs ...\r\n if IncRTs:\r\n ax2 = ax.twinx() # create a y axis\r\n r = 'RT1'\r\n for k in RTDict[r]:\r\n if k.find('x_') != 0:\r\n x = RTDict[r]['x_'+k]# - v\r\n ax2.plot(x ,[np.array(n).mean() for n in RTDict[r][k]],\r\n color = colors[k], lw = 3, label = k)\r\n ci = np.array([bootci(n, 100) for n in RTDict[r][k]])\r\n ax2.fill_between(x, ci[:,0], ci[:,1], alpha = 0.5, color = colors[k])\r\n ax2.set_ylabel('Time (sec)')\r\n ax2.set_ylim(0, 1.0)\r\n \r\n # grid and limits\r\n ax.grid(axis = 'both')\r\n ax.set_xlim(-1, s[-1]+1)\r\n \r\n # make the first axes' background transparent ad raise it\r\n #ax.set_axis_bgcolor('none')\r\n #ax.set_zorder(10)\r\n \r\n # handler for the legend. The legend corresponds to the first axes\r\n #leg = ax.legend(loc='lower left', shadow=True, prop={'size':12}, fancybox = True)\r\n #leg.set_zorder(100)\r\n #leg.draggable()\r\n \r\n fig.tight_layout()\r\n fig.canvas.draw()\r\n \r\n######################################################################################################################## \r\n\r\n\r\ndef BilateralTask(pth):\r\n\r\n import datetime as dtime\r\n \r\n ratNames = bhv.GetRatNames(prefix = 'HMV', pth = pth)\r\n\r\n class SetParams(dt.DataSet):\r\n RatName = di.ChoiceItem('Choose a Rat', ratNames )\r\n Task = di.StringItem('Regular Expr', 'UNBIAS|6KR_9KL|2T')\r\n PlotMSN = di.BoolItem('PlotMSN?').set_pos(col=0)\r\n PlotEq = di.BoolItem('Plot Equations?').set_pos(col=1)\r\n SaveFig = di.BoolItem('Save Figure', default=False)\r\n BiasIndx = di.ChoiceItem('Discr Indx', [(1,'Simple'),(2,'Complex'),])\r\n \r\n Params=SetParams()\r\n if Params.edit()==1:\r\n RatName = ratNames[Params.RatName]\r\n Task = Params.Task\r\n files, MSN = bhv.GetFilenames(RatName, RegExp=Task, BhvDir = pth)\r\n\r\n Dates=[]\r\n # build a dictionary to hold all the data\r\n Stims = {}; Stims['Stim0'] = {}; Stims['Stim1'] = {}\r\n params = ['mRTT', 'mRT1', 'mRT2', 'mRT3', 'eRTT', 'eRT1', 'eRT2', 'eRT3', 'n']\r\n for p in params:\r\n Stims['Stim0'][p] = []\r\n Stims['Stim1'][p] = []\r\n\r\n # iterate over files\r\n for k in files:\r\n Data = bhv.LoadBehFile(k)\r\n Dates.append(np.int32(Data['StartDate']))\r\n if not bhv.FindStims(Data):\r\n Data = bhv.GetBhvParams(Data)\r\n\r\n for s in ['Stim0', 'Stim1']:\r\n if Data.has_key(s):\r\n for p in params:\r\n if p == 'n':\r\n if Data[s].has_key('RTTe'):\r\n rE = Data[s]['RTTe'].size\r\n else:\r\n rE = 0\r\n Stims[s][p].append([Data[s]['RTT'].size, rE])\r\n if Data[s]['RTT'].size > 1:\r\n if p[0] == 'm':\r\n Stims[s][p].append(Data[s][p[1:]].mean())\r\n elif p[0] =='e':\r\n Stims[s][p].append(bootci(Data[s][p[1:]], 100))\r\n else:\r\n if p[0] == 'm':\r\n Stims[s][p].append(0)\r\n elif p[0] =='e':\r\n Stims[s][p].append([0,0])\r\n\r\n # transform everything into an array\r\n for p in params:\r\n Stims['Stim0'][p] = np.array(Stims['Stim0'][p])\r\n Stims['Stim1'][p] = np.array(Stims['Stim1'][p])\r\n\r\n mn = ['mRT1','mRT2','mRT3']\r\n er = ['eRT1','eRT2','eRT3']\r\n for m, e in zip(mn,er):\r\n Stims['Stim0'][e] = [Stims['Stim0'][m]-Stims['Stim0'][e][:,0]\\\r\n ,Stims['Stim0'][e][:,1]-Stims['Stim0'][m]]\r\n Stims['Stim1'][e] = [Stims['Stim1'][m]-Stims['Stim1'][e][:,0]\\\r\n ,Stims['Stim1'][e][:,1]-Stims['Stim1'][m]]\r\n\r\n # Calculate task dates and MSNs\r\n for j, k in enumerate(MSN):\r\n if re.search('(?<=NP04A_).*',k):\r\n MSN[j]=re.search('(?<=NP04A_).*', k).group()\r\n\r\n Dates = np.array(Dates)\r\n Mondays = np.array([], dtype = np.int32)\r\n\r\n # determine if that date is a monday\r\n for j,k in enumerate(Dates):\r\n if dtime.date(k[2],k[0],k[1]).isoweekday()==1:\r\n Mondays = np.append(Mondays, j)\r\n \r\n Dates2 = ['%02d/%02d' % (k[0],k[1]) for k in Dates]\r\n \r\n gAlpha = 0.6; orig = 0; a = 0.6; h = 0.6; ErrLine = 3\r\n col = [ [0,.2,.7] , [0,.4,0] , [.8,.1,.1] ]\r\n yl = [-1, max([Stims['Stim0']['mRTT'].size, Stims['Stim0']['mRTT'].size])]\r\n\r\n plt.figure()\r\n x = np.arange(1, Stims['Stim0']['mRT1'].size+1)\r\n for m, e, c in zip(mn, er, col):\r\n plt.barh(x, Stims['Stim0'][m], left=orig, height=h, xerr=Stims['Stim0'][e],\r\n color=c, alpha=a, align='center', edgecolor='', label = m,\r\n error_kw={'elinewidth':ErrLine,'ecolor':c})\r\n orig = orig+Stims['Stim0'][m]\r\n\r\n x = np.arange(1, Stims['Stim1']['mRT1'].size+1)\r\n orig = 0\r\n for m, e, c in zip(mn, er, col):\r\n plt.barh(x, -Stims['Stim1'][m], left=orig, height=h, xerr=Stims['Stim1'][e],\r\n color=c, alpha=a, align='center', edgecolor='', label = '_nolegend_',\r\n error_kw={'elinewidth':ErrLine,'ecolor':c})\r\n orig = orig-Stims['Stim1'][m]\r\n\r\n plt.plot(np.zeros(len(x[Mondays])),x[Mondays],'o',color=[1,1,0], label='Monday')\r\n plt.legend(fancybox = True)#, mode='expand', ncol=4, prop = {'size':9},loc=8)\r\n \r\n plt.title(RatName+' '+Task)\r\n xl = 1.25*(max(max(Stims['Stim0']['mRTT']), max(Stims['Stim1']['mRTT'])))\r\n plt.xlim(-xl,xl) \r\n\r\n # Draw a textbox with the name of the task\r\n if Params.PlotMSN == 1:\r\n bp = dict(boxstyle=\"round\", alpha=0.7, fc='w', ec=[.5,.5,.5])\r\n for j,k in enumerate(MSN):\r\n plt.text(-xl+0.1, x[j], k, va='center', fontsize=7, bbox=bp)\r\n \r\n # Add 'L' and 'R' arrows\r\n bp = dict(boxstyle=\"LArrow\", alpha=0.7, fc='w', ec=[.5,.5,.5])\r\n plt.text(-0.85*xl, 0.5+yl[1]/2.0, 'L', ha='center', va='center', fontsize=15, bbox=bp)\r\n bp['boxstyle']=\"RArrow\"\r\n plt.text( 0.85*xl, 0.5+yl[1]/2.0, 'R', ha='center', va='center', fontsize=15, bbox=bp)\r\n plt.grid(alpha = gAlpha)\r\n plt.ylim(0, len(x)+1)\r\n plt.ylabel('Session Date')\r\n plt.yticks(x, Dates2, fontsize=8)\r\n plt.xlabel('Time (sec)')\r\n\r\n #######################################################################################################\r\n ## Plots the number of trials for corrects, incorrects for each session\r\n\r\n col=[[0,.2,.7],[0,.4,0],[.8,.1,.1]]\r\n a=0.8\r\n\r\n plt.subplot(2,2,2)\r\n\r\n x = np.arange(1, len(Stim0)+1)\r\n b1 = plt.barh(x, nR[:,0], height=h, left=0, align='center', color=col[0], alpha=a)\r\n b2 = plt.barh(x, nR[:,1], height=h, left=nR[:,0], align='center', color=col[2], alpha=a)\r\n\r\n x = np.arange(1, len(Stim1)+1)\r\n b1 = plt.barh(x, -nL[:,0], height=h, left=0, align='center', color=col[0], alpha=a)\r\n b2 = plt.barh(x, -nL[:,1], height=h, left=-nL[:,0], align='center', color=col[2], alpha=a)\r\n\r\n plt.plot([0,0],[-1,yl[1]+2],'k',linewidth=2)\r\n p1, = plt.plot(np.zeros(len(x[Mondays])), x[Mondays], 'o', color=[1,1,0])\r\n\r\n plt.legend([b1,b2,p1],['Corrects','Incorrects','Monday'],mode='expand',\r\n loc=8, ncol=3, fancybox=True, prop={'size':9})\r\n\r\n plt.title('Number of trials for '+RatName+' '+Task, fontdict={'fontsize':10})\r\n plt.xlabel('Number of trials')\r\n plt.yticks(x, '')\r\n plt.grid(alpha = gAlpha)\r\n plt.tight_layout()\r\n xl = max(np.append(np.sum(nR,axis=1), np.sum(nL,axis=1)))\r\n\r\n plt.ylim(yl)\r\n plt.xlim(-1.1*xl,1.1*xl)\r\n\r\n # Add 'L' and 'R' arrows\r\n bp = dict(boxstyle=\"LArrow\", alpha=0.7, fc='w', ec=[.5,.5,.5])\r\n plt.text(-0.85*1.1*xl, 0.5+yl[1]/2., 'L', ha='center', va='center', fontsize=15, bbox=bp)\r\n bp = dict(boxstyle=\"RArrow\", alpha=0.7, fc='w', ec=[.5,.5,.5])\r\n plt.text( 0.85*1.1*xl, 0.5+yl[1]/2., 'R', ha='center', va='center', fontsize=15, bbox=bp)\r\n\r\n #######################################################################################################\r\n ## Plots the discriminability index\r\n\r\n plt.subplot(2,2,3)\r\n\r\n dR=nR[:,0]/(nR[:,0]+nR[:,1])\r\n dL=nL[:,0]/(nL[:,0]+nL[:,1])\r\n\r\n x = np.arange(1, max([len(Stim0), len(Stim1)])+1)\r\n b1, = plt.plot(dR, range(1,dR.size+1), linewidth=2, color=col[0], marker='o', mec=col[0], ms=6)\r\n b2, = plt.plot(dL, range(1,dL.size+1), linewidth=2, color=col[1], marker='o', mec=col[1], ms=6)\r\n p1, = plt.plot(np.zeros(len(x[Mondays])), x[Mondays], 'o', color=[1,1,0])\r\n\r\n plt.legend([b2,b1,p1], ['Left','Right','Monday'], mode='expand',\r\n ncol=3, loc=8, fancybox=True, prop={'size':9})\r\n\r\n if Params.PlotEq:\r\n bp = dict(boxstyle=\"round\", alpha=0.7, fc='w', ec=[.5,.5,.5])\r\n plt.text(0.5, yl[1]*0.9, r'$\\frac{Corrrects}{Corrects+Incorrects}$',\r\n ha='center', va='center', fontsize=14, bbox=bp)\r\n\r\n if PlotMSN==1:\r\n bp = dict(boxstyle=\"round\", alpha=0.7, fc='w', ec=[.5,.5,.5])\r\n for j,k in enumerate(MSN):\r\n plt.text(0.025,x[j],k,va='center',fontsize=7, bbox=bp)\r\n\r\n plt.xlabel('Discriminability Index')\r\n plt.xlim(-0.1,1.1)\r\n plt.ylim(yl)\r\n plt.ylabel('Session Date')\r\n plt.yticks(x,Dates2,fontsize=8)\r\n plt.xticks([0.0, 0.2, 0.4, 0.6, 0.8, 1.0] , [0.0, 0.2, 0.4, 0.6, 0.8, 1.0])\r\n plt.grid(alpha = gAlpha)\r\n\r\n #######################################################################################################\r\n ## PLots the bias index\r\n\r\n plt.subplot(2,2,4)\r\n\r\n if Params.PlotEq:\r\n if Params.BiasIndx==2:\r\n eq=r'$\\frac{Cor_{Right}-Cor_{Left}}{Cor_{Right}+Inc_{Right}+Cor_{Left}+Inc_{Left}}$'\r\n elif Params.BiasIndx==1:\r\n eq=r'$\\frac{Cor_{Right}}{Cor_{Right}+Inc_{Right}}-\\frac{Cor_{Left}}{Cor_{Left}+Inc_{Left}}$'\r\n \r\n bp = dict(boxstyle=\"round\", alpha=0.7, fc='w', ec=[.5,.5,.5])\r\n plt.text(-0.5, yl[1]*0.9, eq, ha='center', va='center', fontsize=14, bbox=bp)\r\n\r\n if Params.BiasIndx==2:\r\n dI=((nR[:,0]-nR[:,1])-(nL[:,0]-nL[:,1]))/(nR[:,0]+nR[:,1]+nL[:,0]+nL[:,1]+1e-3)\r\n elif Params.BiasIndx==1:\r\n dI=dR-dL\r\n \r\n b1 = plt.barh(x, dI, height=h, align='center',color=col[0],alpha=a)\r\n plt.plot([0,0],[-1,len(x)+2],'k',linewidth=2)\r\n p1, = plt.plot(np.zeros(len(x[Mondays])),x[Mondays],'o',color=[1,1,0])\r\n plt.xlim(-1,1)\r\n plt.ylim(yl)\r\n plt.yticks(x,'')\r\n plt.grid(alpha = gAlpha)\r\n\r\n # Add 'L' and 'R' arrows\r\n bp = dict(boxstyle=\"LArrow\", alpha=0.7, fc='w', ec=[.5,.5,.5])\r\n plt.text(-0.8, 0.5+yl[1]/2., 'L', ha='center', va='center', fontsize=15, bbox=bp)\r\n bp = dict(boxstyle=\"RArrow\", alpha=0.7, fc='w', ec=[.5,.5,.5])\r\n plt.text( 0.8, 0.5+yl[1]/2., 'R', ha='center', va='center', fontsize=15, bbox=bp)\r\n\r\n plt.tight_layout()\r\n\r\n #######################################################################################################\r\n ## Save the figure\r\n\r\n if Params.SaveFig:\r\n SaveFigure(FigHandles)\r\n\r\n########################################################################################################################\r\n\r\ndef Sessions_NHits_NMiss_NError(Stims=[], horizontal=True, labels=[], axes=[],\r\n XYTicks=[True,True], XYLabels=[True,True], colors=[ [0,.2,.7], [0,.4,0], [.8,.1,.1] ]):\r\n\r\n '''Helper function to plot the Sessions numbers of corrects, incorrects and misses as an stacked barplot\r\n INPUT:\r\n -Sesssions: [numpy array] containing one column for each number of hits, miss and errors,\r\n and rows for each session\r\n -horizontal: whether to plot vertical or horizontal bars.\r\n -labels: labels of each column\r\n -axes: provide an axes handle if you want to embed it in a figure\r\n -XYTicks: whether to draw the x and y tickmarks. Default: True\r\n -XYLabels: whether to draw the x and y labels. Default: True'''\r\n\r\n BarAlpha = 0.8\r\n BarHeight = 0.8\r\n GridAlpha = 0.6\r\n\r\n \r\n if not axes or not isinstance(axes, matplotlib.axes.Axes):\r\n ax=plt.subplot(111)\r\n rc('font', size=9, family='monospace', serif='Bitstream Vera Sans Mono')\r\n\r\n if horizontal:\r\n if Stim0:\r\n x = np.arange(1, len(Stim0)+1)\r\n b1 = plt.barh(x, nR[:,0], height=BarHeight, left=0, align='center',color=col[0], alpha=BarAlpha)\r\n b2 = plt.barh(x, nR[:,1], height=BarHeight, left=nR[:,0], align='center',color=col[2], alpha=BarAlpha)\r\n\r\n if Stim1:\r\n x = np.arange(1, len(Stim1)+1)\r\n b1 = plt.barh(x, -nL[:,0], height=BarHeight, left=0, align='center', color=col[0], alpha=BarAlpha)\r\n b2 = plt.barh(x, -nL[:,1], height=BarHeight, left=-nL[:,0], align='center', color=col[2], alpha=BarAlpha)\r\n\r\n plt.plot([0,0],[-1,yl[1]+2],'k',linewidth=2)\r\n p1, = plt.plot(np.zeros(len(x[Mondays])), x[Mondays], 'o', color=[1,1,0])\r\n\r\n # Add 'L' and 'R' arrows\r\n bp = dict(boxstyle=\"LArrow\", alpha=0.7, fc='w', ec=[.5,.5,.5])\r\n plt.text(-0.85*1.1*xl, 0.5+yl[1]/2., 'L', ha='center', va='center', fontsize=15, bbox=bp)\r\n\r\n bp = dict(boxstyle=\"RArrow\", alpha=0.7, fc='w', ec=[.5,.5,.5])\r\n plt.text( 0.85*1.1*xl, 0.5+yl[1]/2., 'R', ha='center', va='center', fontsize=15, bbox=bp)\r\n \r\n else:\r\n pass\r\n \r\n plt.legend([b1,b2,p1],['Corrects','Incorrects','Monday'],mode='expand',\r\n loc=8, ncol=3, fancybox=True, prop={'size':9})\r\n\r\n plt.title('Number of trials for '+RatName+' '+Task, fontdict={'fontsize':10})\r\n plt.xlabel('Number of trials')\r\n plt.yticks(x, '')\r\n plt.grid(alpha=GridAlpha)\r\n plt.tight_layout()\r\n xl = max(np.append(np.sum(nR,axis=1), np.sum(nL,axis=1)))\r\n\r\n plt.ylim(yl)\r\n plt.xlim(-1.1*xl,1.1*xl)\r\n\r\n########################################################################################################################\r\n \r\ndef PlotBeh(Data):\r\n import matplotlib.pyplot as p\r\n\r\n Stims = [k for k in Data.keys() if k.find('Stim')!=-1]\r\n\r\n rc('font',size=8)\r\n rc('font',family='serif')\r\n\r\n if bhv.GetMapping(Data)==1:\r\n\r\n p.figure(facecolor='w',dpi=100,figsize=(8,3.5))\r\n RTT = Stim['RTT']\r\n sRT = np.argsort(RTT)\r\n RT0 = Stim['RT0'][sRT]\r\n RT1 = Stim['RT1'][sRT]\r\n RT2 = Stim['RT2'][sRT]\r\n RT3 = Stim['RT3'][sRT]\r\n\r\n nTrials = len(Stim['RTT'])\r\n y = range(nTrials)\r\n\r\n p.plot(-RT0,y,'c.',markeredgecolor='c')\r\n p.plot(np.zeros(nTrials),y,'k.',markeredgecolor='k')\r\n p.plot(RT1,y,'b.',markeredgecolor='b')\r\n p.plot(RT1+RT2,y,'r.',markeredgecolor='r')\r\n p.plot(RT1+RT2+RT3,y,'g.',markeredgecolor='g')\r\n p.xlim(-1,np.max(RTT)+0.5)\r\n p.title('Reaction times')\r\n p.show()\r\n\r\n elif GetMapping(Data)==2:\r\n p.figure(facecolor='w',dpi=100,figsize=(8,3.5))\r\n j=1\r\n for k in Stims:\r\n\r\n p.subplot(2,2,j)\r\n\r\n if Data[k].has_key('RTTc') and Data[k].has_key('RT0c') and Data[k].has_key('RT1c') and Data[k].has_key('RT2c') and Data[k].has_key('RT3c'):\r\n RTT=Data[k]['RTTc']\r\n sRT=np.argsort(RTT)\r\n nTrials=len(Data[k]['RTTc'])\r\n y=range(nTrials)\r\n p.plot(np.zeros(nTrials),y,'k.',markeredgecolor='k')\r\n\r\n RT0 = Data[k]['RT0c'][sRT]\r\n RT1 = Data[k]['RT1c'][sRT]\r\n RT2 = Data[k]['RT2c'][sRT]\r\n RT3 = Data[k]['RT3c'][sRT]\r\n\r\n p.ylim=(-1,RTT.size*1.1)\r\n p.plot(-RT0,y,'c.',markeredgecolor='c')\r\n p.plot(RT1,y,'b.',markeredgecolor='b')\r\n p.plot(RT1+RT2,y,'r.',markeredgecolor='r')\r\n p.plot(RT1+RT2+RT3,y,'g.',markeredgecolor='g')\r\n p.xlim(-1,np.max(RTT)+0.5)\r\n p.grid(True)\r\n p.title('Reaction times: Corrects '+str(Data[k]['Descr']))\r\n p.xlabel('Time (sec)',fontsize=10)\r\n j=j+1\r\n\r\n p.subplot(2,2,j)\r\n\r\n if Data[k].has_key('RTTi') and Data[k].has_key('RT0i') and Data[k].has_key('RT1i') and Data[k].has_key('RT2i') and Data[k].has_key('RT3i'):\r\n RTT=Data[k]['RTTi']\r\n sRT=np.argsort(RTT)\r\n nTrials=len(Data[k]['RTTi'])\r\n y=range(nTrials)\r\n p.plot(np.zeros(nTrials),y,'k.',markeredgecolor='k')\r\n\r\n RT0=Data[k]['RT0i'][sRT]\r\n RT1=Data[k]['RT1i'][sRT]\r\n RT2=Data[k]['RT2i'][sRT]\r\n RT3=Data[k]['RT3i'][sRT]\r\n\r\n p.ylim=(-1,RTT.size*1.1)\r\n p.plot(-RT0,y,'c.',markeredgecolor='c')\r\n p.plot(RT1,y,'b.',markeredgecolor='b')\r\n p.plot(RT1+RT2,y,'r.',markeredgecolor='r')\r\n p.plot(RT1+RT2+RT3,y,'g.',markeredgecolor='g')\r\n p.xlim(-1,np.max(RTT)+0.5)\r\n p.grid(True)\r\n p.title('Reaction times: Incorrects '+str(Data[k]['Descr']))\r\n p.xlabel('Time (sec)',fontsize=10)\r\n j=j+1\r\n\r\n########################################################################################################################\r\n\r\ndef PlotRTStats(Stim):\r\n import matplotlib.pyplot as p\r\n\r\n rc('font',size=8)\r\n rc('font',family='serif')\r\n\r\n for x in Stim.keys():\r\n if re.search('RT[0-9][c,i]', x):\r\n m=2\r\n break\r\n else:\r\n m=1\r\n\r\n if m==1:\r\n keys = ['RT1','RT2','RT3']\r\n elif m == 2:\r\n keys = ['RT1c','RT2c','RT3c']\r\n \r\n fig = p.figure(facecolor='w', dpi=100, figsize=(8,3.5))\r\n\r\n for j, k in enumerate(keys):\r\n ax = fig.add_subplot(3, 2, 2*(j+1)-1)\r\n ax.hist(Stim[k], 50, histtype='stepfilled')\r\n ax.set_xlim(-.5,1.5)\r\n ax.grid(True)\r\n \r\n ax = fig.add_subplot(3, 2, 2*(j+1))\r\n ax.boxplot(Stim[k],notch=1, sym='')\r\n ax.grid(True)\r\n \r\n fig.show()\r\n\r\n########################################################################################################################\r\n\r\ndef SaveFigure(FigHandles=[],FigName='Fig_',dpi=300, SaveDir='', Format='.jpg'):\r\n\r\n '''Helper function to save figures'''\r\n if not isinstance(FigHandles,list): FigHandles=[FigHandles]\r\n if not FigHandles or not [isinstance(k,Figure) for k in FigHandles]:\r\n import matplotlib._pylab_helpers\r\n FigHandles=[manager.canvas.figure for manager in matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]\r\n if FigHandles:\r\n class SetParams(dt.DataSet):\r\n '''Select the paramters to save the currently active figure'''\r\n Figures = di.MultipleChoiceItem(label='Select a Figure',\r\n choices=[str(k.number) for k in FigHandles])\r\n \r\n FigName = di.StringItem(label='FigName',default='Fig_').set_pos(col=0)\r\n \r\n Format = di.ChoiceItem(label='Format',\r\n choices=['.jpg','.png','.svg','.pdf']).set_pos(col=1)\r\n \r\n ImgDir = di.DirectoryItem(label='Directory 2 save').set_pos(col=0)\r\n \r\n dpi = di.IntItem(label='dpi',default=300,min=50,\r\n max=600,nonzero=True,slider=True).set_pos(col=0)\r\n \r\n Params = SetParams()\r\n \r\n if Params.edit() == 1 and len(Params.Figures)>0:\r\n FigHandles = [FigHandles[k] for k in Params.Figures]\r\n Format = ['.jpg','.png','.svg','.pdf'][Params.Format]\r\n SaveDir = Params.ImgDir\r\n dpi = Params.dpi\r\n \r\n if not Params.FigName:\r\n FigName='Fig_'\r\n else:\r\n FigName=Params.FigName\r\n \r\n else:\r\n return\r\n else:\r\n return\r\n\r\n for k in FigHandles:\r\n plt.figure(k.number)\r\n plt.savefig(os.path.join(SaveDir,FigName+'_'+str(k.number)+Format),\r\n dpi = dpi)\r\n\r\n########################################################################################################################\r\n\r\ndef Outliers4Skew(x):\r\n ''' Taken from:\r\n G. Brys; M. Hubert; A. Struyf (2004). A Robust Measure of Skewness.\r\n % Journal of Computational and Graphical Statistics 13(4), 996-1017.'''\r\n\r\n x_med = np.median(x)\r\n xi = x[x<=x_med]\r\n xj = x[x>=x_med]\r\n\r\n h=[]\r\n\r\n for i in xi:\r\n for j in xj:\r\n if (j-i)==0:\r\n h.append(0)\r\n continue\r\n else:\r\n h.append(((j-x_med)-(x_med-i))/(j-i))\r\n\r\n MedCouple = np.median(h)\r\n p = np.percentile(x,[25,75])\r\n IQR = p[1]-p[0]\r\n Lower = p[0]-1.5*np.exp(-3.5*MedCouple)*IQR\r\n Upper = p[1]+1.5*np.exp( 4.0*MedCouple)*IQR\r\n\r\n return MedCouple, Lower, Upper\r\n\r\n########################################################################################################################\r\n\r\ndef bootci(Data, nSamples = 1000, Stat=['mean','median'][0], alpha = 5):\r\n '''Calculates the confidence interval by generating\r\n nSamples with replacement from a population'''\r\n\r\n # imports\r\n from scipy.stats import scoreatpercentile, nanmean\r\n\r\n # convert Data into an array\r\n Data = np.array(Data)\r\n\r\n # get its length\r\n lSamp = Data.size\r\n \r\n # return if smaller than 5 samples\r\n if lSamp <= 5:\r\n return np.array([Data.mean(), Data.mean()])\r\n\r\n # generate n random indexes\r\n index = np.array([np.random.randint(0,lSamp,lSamp) for y in range(nSamples)])\r\n\r\n # calculate the statistic\r\n if Stat=='mean':\r\n mSamp = nanmean(Data[index], axis=1)\r\n elif Stat=='median':\r\n mSamp = np.median(Data[index], axis=1)\r\n\r\n # calculate the confidence interval\r\n CI = np.array([scoreatpercentile(mSamp, alpha),\r\n scoreatpercentile(mSamp, 100-alpha)])\r\n return CI\r\n" } ]
2
cawthorna/Memory_Express_Web_Scraper
https://github.com/cawthorna/Memory_Express_Web_Scraper
83112bcf70fd4a333697722383d2a45289880462
778d3490117d6a9f5b0ec7505321d4f312349b11
819a5c4c6c6621c0f27021485257e28e527f7d6f
refs/heads/master
2020-03-14T13:53:54.394029
2018-06-07T02:16:05
2018-06-07T02:16:05
131,642,299
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6275911331176758, "alphanum_fraction": 0.6364069581031799, "avg_line_length": 30.02290153503418, "blob_id": "d2dd09183b6cfc2d3a892b319290e04675feafa7", "content_id": "0ff6baeeac901cf91661bf3eefb6a33922a940c5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4197, "license_type": "no_license", "max_line_length": 303, "num_lines": 131, "path": "/PythonApplication1/MemexEmail.py", "repo_name": "cawthorna/Memory_Express_Web_Scraper", "src_encoding": "UTF-8", "text": "from bs4 import BeautifulSoup\r\nfrom urllib2 import urlopen\r\nfrom smtplib import SMTP_SSL\r\nfrom email.mime.multipart import MIMEMultipart\r\nfrom email.mime.text import MIMEText\r\nimport locale\r\nimport datetime\r\nimport re\r\nimport os\r\n\r\n# Set Debug\r\ndebug = False\r\nSendToOthers = False\r\nsendEmail = True\r\n\r\n# From https://stackoverflow.com/questions/4060221/how-to-reliably-open-a-file-in-the-same-directory-as-a-python-script\r\n__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))\r\n\r\n# Send mail Method\r\ndef send_email(toEmail):\r\n\r\n # Put params together\r\n from_addr = 'Do Not Reply <no.reply.cawthornpi@gmail.com>'\r\n subject = \"MemEx Deal of the Day: \" + productName\r\n date = datetime.datetime.now().strftime(\"%d/%m/%Y %H:%M\")\r\n locale.setlocale(locale.LC_NUMERIC,'')\r\n message_text = productName + \" is on sale at Memory Express today for \" + salePrice + \".\\n\\nIt is \" + saleAmount[1:-1] + \" off of the regular price of \" + regularPrice + \" (\" + str(round((float(locale.atof(saleAmount[2:-1]))/float(locale.atof(regularPrice[1:-1])))*100,2)) + \"% off).\\n\\n\" + saleEnds\r\n\r\n # Create Message Container\r\n msg = MIMEMultipart('alternative')\r\n msg['Subject'] = subject\r\n msg['From'] = from_addr\r\n msg['To'] = toEmail\r\n\r\n\r\n # Create Message Body\r\n text = message_text\r\n\r\n html = \"<html><head></head><body>\"\r\n html += \"<p>\" + message_text + \"</p>\"\r\n html += \"<a href=\\\"\" + productURL + \"\\\">\" + productName + \"</a><br><br>\"\r\n html += '<div style=\"text-align: center;\">'\r\n html += str(picTag)\r\n html += \"</div></body></html>\"\r\n\r\n # Record MIME Types of both parts - text/plain and text/html.\r\n part1 = MIMEText(text, 'plain')\r\n part2 = MIMEText(html, 'html')\r\n\r\n # Attach parts into message container\r\n msg.attach(part1)\r\n msg.attach(part2)\r\n\r\n # Send message\r\n if(sendEmail):\r\n smtp.sendmail(from_addr,toEmail,msg.as_string())\r\n else:\r\n print msg\r\n\r\n\r\n# Scrape Information\r\nif(debug):\r\n print \"Scraping Information... 0%\"\r\nsoup = BeautifulSoup(urlopen(\"http://www.memoryexpress.com/\"), 'html.parser')\r\ndailyDealUrl = soup.find(\"div\", {\"class\": \"c-shhp-daily-deal\"}).find(\"a\")\r\npicTag = soup.body.find(\"div\", {\"class\": \"c-shhp-daily-deal__image\"}).find(\"img\")\r\n\r\nif(debug):\r\n print \"Scraping Information... 5% \"\r\n\r\nsoup = BeautifulSoup(urlopen(\"https://www.memoryexpress.com\" + dailyDealUrl['href']), 'html.parser')\r\nproductName = soup.title.text.split(' at Memory Express - Memory Express Inc.')[0]\r\nproductURL = \"https://www.memoryexpress.com\" + dailyDealUrl['href']\r\n\r\npriceStructure = soup.find('div', attrs={'id':\"ProductPricing\"})\r\n\r\nsalePrice = priceStructure.find('div', attrs={'class':'GrandTotal'}).get_text()\r\nsalePrice = str(salePrice[salePrice.find(\"$\"):salePrice.find(\".\",salePrice.find(\"$\"))+3])\r\nsaleAmount = priceStructure.find('div', attrs={'class':'InstantSavings'}).get_text()\r\nregularPrice = priceStructure.find('div', attrs={'class':'RegularPrice'}).get_text()\r\nsaleEnds = priceStructure.find('div', attrs={'class':'EndDate'}).get_text()\r\n\r\nif(debug):\r\n print \"Scraping Information... 50% \" + productName + \" \" + saleEnds\r\n\r\nsaleAmount = saleAmount[1:].split('\\n')[1]\r\nregularPrice = regularPrice[1:].split('\\n')[0]\r\n\r\nif(debug):\r\n print \"Scraping Information... 100% \" + salePrice + \" \" + saleAmount + \" \" + regularPrice\r\n print \"Setting up SMTP\"\r\n\r\n# Email productName, salePrice, saleAmmount, regularPrice, saleEnds to text mailing list\r\n## Setup SMTP\r\nsmtp = SMTP_SSL()\r\nsmtp.set_debuglevel(0)\r\n\r\nif(debug):\r\n print \"Connecting...\"\r\n\r\nsmtp.connect('smtp.gmail.com')\r\n\r\n\r\nif(debug):\r\n print \"Logging in...\"\r\n\r\nf = open(os.path.join(__location__, 'email_credentials.txt'), 'r');\r\nusername = f.readline().strip('\\n\\r')\r\npassword = f.readline().strip('\\n\\r')\r\nf.close()\r\nif debug:\r\n print \"'\" + username + \"'\"\r\n print \"'\" + password + \"'\"\r\n\r\nsmtp.login(username,password)\r\n\r\nf = open(os.path.join(__location__, 'emails.txt'), 'r');\r\n## Send Emails\r\nfor email in f:\r\n send_email(email)\r\n if(debug):\r\n print \"Sending Email to: \" + email + \"\\n\"\r\n break\r\n\r\nf.close()\r\n\r\n## Close SMTP\r\nsmtp.close()\r\n\r\nif(debug):\r\n print \"SMTP closed.\"\r\n\r\n" } ]
1
RyanJKavanaugh/QATest101
https://github.com/RyanJKavanaugh/QATest101
73e4d009167233dfd77c58d4a0d9dddbd1f7deb3
e20c336d64b6f81d8dc40c4902e22d30031fa347
9500c32bb83c642b30100c3a919269a960926426
refs/heads/master
2021-07-21T16:37:42.456257
2017-10-30T17:04:46
2017-10-30T17:04:46
104,787,739
0
1
null
2017-09-25T18:38:25
2017-09-25T18:41:24
2017-10-12T17:55:25
Python
[ { "alpha_fraction": 0.6293785572052002, "alphanum_fraction": 0.6451977491378784, "avg_line_length": 27.516128540039062, "blob_id": "3e2a3cce0fbe266e5df7e7eb7943c0649ee7c0c4", "content_id": "266bd0f8c35c1de1acf98e595b66205cc00bf1c5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1770, "license_type": "no_license", "max_line_length": 90, "num_lines": 62, "path": "/VerifyHeaderLinks.py", "repo_name": "RyanJKavanaugh/QATest101", "src_encoding": "UTF-8", "text": "# coding=utf-8\nfrom selenium import webdriver\nfrom selenium import webdriver\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import Select\nfrom selenium.webdriver.common import action_chains, keys\nfrom selenium.webdriver.common.action_chains import ActionChains\nimport time\nimport unittest\nimport bs4\nimport urllib2\nfrom BeautifulSoup import BeautifulSoup\nimport requests\nfrom pyvirtualdisplay import Display\n# -*- coding: utf-8 -*-\n\ndisplay = Display(visible=0, size=(800, 800))\ndisplay.start()\n\n\nclass Verify_Idaho_Links(unittest.TestCase):\n\n\n def test_idaho_tg_web_topbar_links(self):\n strList = []\n httpLinkList = []\n\n url = 'http://hb.511.idaho.gov/'\n html_page = urllib2.urlopen(url)\n soup = BeautifulSoup(html_page)\n\n allPageLinks = soup.findAll('a', href=True)\n\n for link in allPageLinks:\n strList.append(str(link['href']))\n\n for realLink in strList:\n if realLink.startswith('http'):\n httpLinkList.append(realLink)\n\n counter = 0\n\n for item in httpLinkList:\n try:\n r = requests.head(item)\n #print r.status_code\n if r.status_code != 200 and r.status_code != 301 and r.status_code != 302:\n print item\n counter =+1\n except:\n print \"failed to connect\"\n counter =+ 1\n\n if counter > 0:\n assert False\n\nif __name__ == '__main__':\n unittest.main()\n\n\n" }, { "alpha_fraction": 0.6846693158149719, "alphanum_fraction": 0.6923602819442749, "avg_line_length": 50.324562072753906, "blob_id": "c29a0bbe71dd34cbcd08d0183aaf5a0e30d98b5c", "content_id": "340e27ed32bd482c5fb49565e2fedaaae8155fde", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5859, "license_type": "no_license", "max_line_length": 150, "num_lines": 114, "path": "/VerifyMapLayers.py", "repo_name": "RyanJKavanaugh/QATest101", "src_encoding": "UTF-8", "text": "# coding=utf-8\nfrom selenium import webdriver\nfrom selenium import webdriver\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import Select\nfrom selenium.webdriver.common import action_chains, keys\nfrom selenium.webdriver.common.action_chains import ActionChains\nimport time\nimport unittest\nfrom pprint import pprint\nfrom pyvirtualdisplay import Display\n# -*- coding: utf-8 -*-\n\n\n\ndisplay = Display(visible=0, size=(800, 800))\ndisplay.start()\n\n\nclass Verify_Idaho_Layers(unittest.TestCase):\n\n def setUp(self):\n self.driver = webdriver.Chrome()\n self.driver.get('http://hb.511.idaho.gov/#roadReports?timeFrame=TODAY&layers=roadReports%2CwinterDriving%2CweatherWarnings%2CotherStates')\n\n\n def test_presence_of_correct_layers(self):\n\n driver = self.driver\n driver.maximize_window()\n\n dropDownMenuWait = WebDriverWait(driver, 20).until(EC.presence_of_element_located((By.ID, 'layers-menu-dropdown-button')))\n driver.find_element_by_id('layers-menu-dropdown-button').click()\n\n #=================== Road Reports Verification\n # Idaho: CHECKED √\n menuItemsWait = WebDriverWait(driver, 20).until(EC.presence_of_element_located((By.XPATH, '//*[@id=\"layerSelector\"]/ul/li[1]/a/span/img[1]')))\n roadReports = driver.find_element_by_xpath('//*[@id=\"layerSelector\"]/ul/li[1]')\n roadReportsOuterHTMLData = driver.execute_script(\"return arguments[0].outerHTML;\", roadReports)\n self.assertIn('images/checkbox-checked.png', roadReportsOuterHTMLData)\n\n #=================== Winter Driving Verification\n # Idaho: CHECKED √\n winterDrivingReports = driver.find_element_by_xpath('//*[@id=\"layerSelector\"]/ul/li[2]')\n winterDrivingOuterHTMLData = driver.execute_script(\"return arguments[0].outerHTML;\", winterDrivingReports)\n self.assertIn('images/checkbox-checked.png', winterDrivingOuterHTMLData)\n\n #=================== Weather Warnings Verification\n # Idaho: CHECKED √\n weatherWarningsReports = driver.find_element_by_xpath('//*[@id=\"layerSelector\"]/ul/li[3]')\n weatherWarningsReportsOuterHTMLData = driver.execute_script(\"return arguments[0].outerHTML;\", weatherWarningsReports)\n self.assertIn('images/checkbox-checked.png', weatherWarningsReportsOuterHTMLData)\n\n #=================== Traffic Speeds Verification\n # Idaho: UNCHECKED\n trafficSpeedsReports = driver.find_element_by_xpath('//*[@id=\"layerSelector\"]/ul/li[4]')\n trafficSpeedsReportsOuterHTMLData = driver.execute_script(\"return arguments[0].outerHTML;\", trafficSpeedsReports)\n self.assertIn('images/checkbox-unchecked.png', trafficSpeedsReportsOuterHTMLData)\n\n # =================== Electronic Signs Verification\n # Idaho: UNCHECKED\n eSignsReports = driver.find_element_by_xpath('//*[@id=\"layerSelector\"]/ul/li[5]')\n eSignsReportsOuterHTMLData = driver.execute_script(\"return arguments[0].outerHTML;\", eSignsReports)\n self.assertIn('images/checkbox-unchecked.png', eSignsReportsOuterHTMLData)\n\n # =================== Mountain Passes Verification\n # Idaho: UNCHECKED\n mountainPassesReports = driver.find_element_by_xpath('//*[@id=\"layerSelector\"]/ul/li[6]')\n mountainPassesReportsOuterHTMLData = driver.execute_script(\"return arguments[0].outerHTML;\", mountainPassesReports)\n self.assertIn('images/checkbox-unchecked.png', mountainPassesReportsOuterHTMLData)\n\n # =================== Camera Verification\n # Idaho: UNCHECKED\n cameraReports = driver.find_element_by_xpath('//*[@id=\"layerSelector\"]/ul/li[7]')\n cameraReportsOuterHTMLData = driver.execute_script(\"return arguments[0].outerHTML;\", cameraReports)\n self.assertIn('images/checkbox-unchecked.png', cameraReportsOuterHTMLData)\n\n # =================== Weather Stations Verification\n # Idaho: UNCHECKED\n weatherStationsReports = driver.find_element_by_xpath('//*[@id=\"layerSelector\"]/ul/li[8]')\n weatherStationsReportsOuterHTMLData = driver.execute_script(\"return arguments[0].outerHTML;\", weatherStationsReports)\n self.assertIn('images/checkbox-unchecked.png', weatherStationsReportsOuterHTMLData)\n\n # =================== Rest Areas Verification\n # Idaho: UNCHECKED\n restAreasReports = driver.find_element_by_xpath('//*[@id=\"layerSelector\"]/ul/li[9]')\n restAreasReportsOuterHTMLData = driver.execute_script(\"return arguments[0].outerHTML;\", restAreasReports)\n self.assertIn('images/checkbox-unchecked.png', restAreasReportsOuterHTMLData)\n\n # =================== Other States' Info Verification\n # Idaho: CHECKED √\n otherStatesReports = driver.find_element_by_xpath('//*[@id=\"layerSelector\"]/ul/li[10]')\n otherStatesReportsOuterHTMLData = driver.execute_script(\"return arguments[0].outerHTML;\", otherStatesReports)\n self.assertIn('images/checkbox-checked.png', otherStatesReportsOuterHTMLData)\n\n # =================== Transit Routes Verification\n # Idaho: UNCHECKED\n transitRoutesReports = driver.find_element_by_xpath('//*[@id=\"layerSelector\"]/ul/li[11]')\n transitRoutesReportsOuterHTMLData = driver.execute_script(\"return arguments[0].outerHTML;\", transitRoutesReports)\n self.assertIn('images/checkbox-unchecked.png', transitRoutesReportsOuterHTMLData)\n\n\n def tearDown(self):\n print \"Test Completed\"\n self.driver.quit()\n\n\nif __name__ == '__main__':\n print ('\\n') + \"Verifying Idaho TG Web Default Map Layers\" + '\\n'\n unittest.main()\n" }, { "alpha_fraction": 0.6935064792633057, "alphanum_fraction": 0.7054545283317566, "avg_line_length": 36.019229888916016, "blob_id": "d04522fc5e007569f1dac983c6940b99f3ff77f0", "content_id": "c3b12d62b93824625bfe82670739bc5e4ee6105b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1929, "license_type": "no_license", "max_line_length": 135, "num_lines": 52, "path": "/VerifyUserLogin.py", "repo_name": "RyanJKavanaugh/QATest101", "src_encoding": "UTF-8", "text": "# coding=utf-8\nfrom selenium import webdriver\nfrom selenium import webdriver\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import Select\nfrom selenium.webdriver.common import action_chains, keys\nfrom selenium.webdriver.common.action_chains import ActionChains\nimport time\nimport unittest\nfrom pyvirtualdisplay import Display\n# -*- coding: utf-8 -*-\n\n\n\ndisplay = Display(visible=0, size=(800, 800))\ndisplay.start()\n\n\nclass Verify_Login_And_Saving_Routes(unittest.TestCase):\n def setUp(self):\n self.driver = webdriver.Chrome()\n print '\\n' + \"Test for Idaho: Verifying login feature\" + '\\n'\n self.driver.get(\"http://hb.511.idaho.gov/\")\n\n\n def test_login_route_creation_and_deletion(self):\n driver = self.driver\n driver.maximize_window()\n\n loginElement = WebDriverWait(driver, 20).until(EC.presence_of_element_located((By.ID, 'sign-in-link')))\n driver.find_element_by_id('sign-in-link').click()\n loginElement2 = WebDriverWait(driver, 20).until(EC.presence_of_element_located((By.ID, 'userAccountEmail')))\n driver.find_element_by_id('userAccountEmail').send_keys('ryan.kavanaugh@crc-corp.com')\n driver.find_element_by_id('userAccountPassword').send_keys('test')\n driver.find_element_by_id('userAccountPassword').submit()\n time.sleep(4)\n\n left_Panel_Wait = WebDriverWait(driver, 20).until(EC.presence_of_element_located((By.XPATH, '//*[@title=\"Ryan’s Favorites\"]')))\n assert driver.find_element_by_xpath(\"//*[contains(text(), 'Ryan’s 511')]\")\n\n\n def tearDown(self):\n print '\\n' + \"Test Completed\"\n self.driver.quit()\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.6748828887939453, "alphanum_fraction": 0.6830311417579651, "avg_line_length": 50.13541793823242, "blob_id": "fb15c1994260f8ab470e1209972e5aced5e5a5c4", "content_id": "724dbe97590520c8684b24fc3a40546056c1911e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4919, "license_type": "no_license", "max_line_length": 170, "num_lines": 96, "path": "/VerifyMenuOptions.py", "repo_name": "RyanJKavanaugh/QATest101", "src_encoding": "UTF-8", "text": "# coding=utf-8\nfrom selenium import webdriver\nfrom selenium import webdriver\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import Select\nfrom selenium.webdriver.common import action_chains, keys\nimport time\nimport unittest\nfrom pyvirtualdisplay import Display\n# -*- coding: utf-8 -*-\n\n\n\ndisplay = Display(visible=0, size=(800, 800))\ndisplay.start()\n\nclass Verify_Idaho_Menu_Options(unittest.TestCase):\n\n def setUp(self):\n self.driver = webdriver.Chrome()\n # self.driver.get('http://idwebtg.carsstage.org/#roadReports?timeFrame=TODAY&layers=roadReports%2CwinterDriving%2CweatherWarnings%2CotherStates')\n self.driver.get('http://hb.511.idaho.gov/#roadReports?timeFrame=TODAY&layers=roadReports%2CwinterDriving%2CweatherWarnings%2CotherStates')\n print ('\\n') + \"Test Verifying Idaho TG Web Lefthand Side Menu Options\"\n\n\n def test_idaho_menu(self):\n\n driver = self.driver\n\n # Login To The System\n element = WebDriverWait(driver, 20).until(EC.presence_of_element_located((By.ID, 'sign-in-link')))\n driver.find_element_by_id('sign-in-link').click()\n driver.find_element_by_id('userAccountEmail').send_keys('ryan.kavanaugh@crc-corp.com')\n driver.find_element_by_id('userAccountPassword').send_keys('test')\n driver.find_element_by_id('userAccountPassword').submit()\n\n # Check that the menu items are all present\n left_Panel_Wait = WebDriverWait(driver, 20).until(EC.presence_of_element_located((By.XPATH, '//*[@title=\"Ryan’s Favorites\"]')))\n # Personalize your 511\n assert (driver.find_element_by_xpath('//*[@title=\"Ryan’s Favorites\"]').is_displayed()) == True, \"Favorites not present\"\n # All Reports\n assert driver.find_element_by_xpath('//*[@title=\"See all traffic reports and winter driving conditions\"]').is_displayed(), \"All Reports not present\"\n # Google Traffic\n assert driver.find_element_by_xpath('//*[@title=\"See up-to-date traffic conditions\"]').is_displayed(), \"Google Traffic not present\"\n # Cameras\n assert driver.find_element_by_xpath('//*[@title=\"See maps and lists of cameras and view camera images\"]').is_displayed(), \"Cameras not present\"\n # Weather Stations\n assert driver.find_element_by_xpath('//*[@title=\"See maps and lists of weather stations and review weather data\"]').is_displayed(), \"Weather Stations not present\"\n # Electronic Signs\n assert driver.find_element_by_xpath('//*[@title=\"See maps and lists of signs\"]').is_displayed(), \"Electronic Signs not present\"\n # Transit Routes\n assert driver.find_element_by_xpath('//*[@title=\"See maps and lists of bus routes and positions\"]').is_displayed(), \"Transit Routes not present\"\n # Mountain Passes\n assert driver.find_element_by_xpath('//*[@title=\"See maps and lists of mountain passes\"]').is_displayed(), \"Mountain Passes not present\"\n # Twitter\n assert driver.find_element_by_xpath('//*[@title=\"Follow area Twitter feeds to get traffic alerts\"]').is_displayed(), \"Twitter not present\"\n\n\n # Check that the menu options that open a new panel are functioning properly\n # Favorites\n driver.find_element_by_xpath('//*[@title=\"Ryan’s Favorites\"]').click()\n assert driver.find_element_by_id('favorites-content-area').is_displayed()\n time.sleep(1)\n home_Button_Wait = WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.ID, 'homeBtn')))\n home_Button_Wait.click()\n\n # Wait for main menu to load\n left_Panel_Wait2 = WebDriverWait(driver, 20).until(EC.presence_of_element_located((By.XPATH, '//*[@title=\"Ryan’s Favorites\"]')))\n\n # Transit Routes\n driver.find_element_by_xpath('//*[@title=\"See maps and lists of bus routes and positions\"]').click()\n assert driver.find_element_by_id('address0').is_displayed()\n time.sleep(1)\n home_Button_Wait.click()\n\n # Wait for main menu to load\n left_Panel_Wait3 = WebDriverWait(driver, 20).until(EC.presence_of_element_located((By.XPATH, '//*[@title=\"Ryan’s Favorites\"]')))\n #time.sleep(3)\n # Twitter\n driver.find_element_by_xpath('//*[@title=\"Follow area Twitter feeds to get traffic alerts\"]').click()\n time.sleep(1)\n left_Panel_WaitTwitter = WebDriverWait(driver, 20).until(EC.presence_of_element_located((By.ID, 'feedGroupsContainer')))\n assert ('Traffic Alerts via Twitter') in driver.page_source, \"The twitter panel is not displayed\"\n\n\n def tearDown(self):\n print '\\n' + \"Test Completed\"\n self.driver.quit()\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.5976627469062805, "alphanum_fraction": 0.6066170930862427, "avg_line_length": 38.220237731933594, "blob_id": "181fefd7274ccdd141ccd46312cb7f4a417651af", "content_id": "82d9ff85ab72bf043cb85f51f6a51b559d2436d0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6589, "license_type": "no_license", "max_line_length": 182, "num_lines": 168, "path": "/VerifyMapIcons.py", "repo_name": "RyanJKavanaugh/QATest101", "src_encoding": "UTF-8", "text": "from selenium import webdriver\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import Select\nfrom selenium.webdriver.common import action_chains, keys\nfrom selenium.webdriver.common.action_chains import ActionChains\nimport time\nimport unittest\nfrom pprint import pprint\nfrom bs4 import BeautifulSoup\nimport json\nimport jsonpickle\nfrom pyvirtualdisplay import Display\n\n\ndisplay = Display(visible=0, size=(800, 800))\ndisplay.start()\n\n\n\nclass Verify_Idaho_Map_Icons(unittest.TestCase):\n\n def setUp(self):\n self.driver = webdriver.Chrome()\n self.driver.maximize_window()\n\n def test_road_reports(self):\n print '\\n' + \"Verifying: Idaho Map Icons -> Road Reports\"\n driver = self.driver\n driver.get('http://idtg.carsprogram.org/events_v1/api/eventMapFeatures?eventClassifications=roadReports')\n tgWebDict = {}\n\n data = driver.find_element_by_tag_name('body').text\n jsonData = json.loads(data)\n\n for item in jsonData:\n IDNum = item.get('id')\n imageName = item.get('representation').get('iconProperties').get('image')\n tgWebDict[IDNum] = imageName\n\n for roadReportsNum in tgWebDict:\n testURL = 'http://hb.511.idaho.gov/#roadReports/eventAlbum/' + str(roadReportsNum) + '?timeFrame=TODAY&layers=roadReports%2CwinterDriving%2CweatherWarnings%2CotherStates'\n driver.get(testURL)\n\n try:\n mainImageWait = WebDriverWait(driver, 20).until(EC.presence_of_element_located((By.ID, 'galleryPopup')))\n assert driver.find_element_by_id('galleryPopup') #.is_displayed()\n except:\n print roadReportsNum\n assert False\n\n\n def test_cameras(self):\n print '\\n' + \"Verifying: Idaho Map Icons -> Cameras\"\n driver = self.driver\n driver.get('http://idtg.carsprogram.org:80/cameras_v1/api/cameras?publicOnly=true')\n tgWebList = {}\n # 1. Grab all of the JSON from the API\n data = driver.find_element_by_tag_name('body').text\n jsonData = json.loads(data)\n # 2. Parse the Json into the dictionary\n for item in jsonData:\n IDNum = item.get('id')\n cameraName = item.get('name')\n tgWebList[IDNum] = cameraName\n # 3. Run through the dictionary to populate the web browser\n for cameraNum in tgWebList:\n testURL = 'http://hb.511.idaho.gov/#cameras/albumView/' + str(cameraNum) + '?timeFrame=TODAY&layers=cameras'\n driver.get(testURL)\n # 4. Assert the web browser is correct by verifying the ablum view\n try:\n albumViewWait = WebDriverWait(driver, 20).until(EC.presence_of_element_located((By.ID, 'galleryPopup')))\n assert driver.find_element_by_id('galleryPopup') # .is_displayed()\n except:\n print cameraNum\n assert False\n\n def test_signs(self):\n print '\\n' + \"Verifying: Idaho Map Icons -> Signs\"\n driver = self.driver\n driver.get('http://idtg.carsprogram.org/signs_v1/api/signs')\n tgWebList = {}\n\n data = driver.find_element_by_tag_name('body').text\n jsonData = json.loads(data)\n\n for item in jsonData:\n IDNum = item.get('idForDisplay')\n locationName = item.get('name')\n tgWebList[IDNum] = locationName\n\n for signNum in tgWebList:\n testURL = 'http://hb.511.idaho.gov/#signs/albumView/idahosigns*' + str(signNum) + '?timeFrame=TODAY&layers=signs'\n driver.get(testURL)\n try:\n albumViewWait = WebDriverWait(driver, 20).until(EC.presence_of_element_located((By.ID, 'galleryPopup')))\n assert driver.find_element_by_id('galleryPopup') # .is_displayed()\n except:\n print signNum\n assert False\n\n\n # def test_a_mountain_passes(self): ?????\n # print '\\n' + \"Verifying: Idaho Map Icons -> Mountain Passes\"\n # driver = self.driver\n # driver.get('http://idtg.carsprogram.org:80/mountainpasses_v1/api/passes')\n # tgWebList = {}\n #\n # data = driver.find_element_by_tag_name('body').text\n # jsonData = json.loads(data)\n #\n #\n # for item in jsonData:\n # IDNum = item.get('id')\n # locationName = item.get('name')\n # tgWebList[IDNum] = locationName\n #\n # for passesNum in tgWebList:\n # if passesNum != 1:\n # testURL = 'http://hb.511.idaho.gov/#mountainPasses/albumView/' + str(passesNum) + '?timeFrame=TODAY&layers=mountainPasses'\n # driver.get(testURL)\n # print testURL\n # try:\n # albumViewWait = WebDriverWait(driver, 20).until(EC.presence_of_element_located((By.ID, 'galleryPopup')))\n # assert driver.find_element_by_id('galleryPopup') # .is_displayed()\n # except:\n # driver.save_screenshot('testing123.png')\n # print passesNum\n # assert False\n # http://idtg.carsprogram.org/mountainpasses_v1/\n\n\n def test_weather_stations(self):\n print '\\n' + \"Verifying: Idaho Map Icons -> Weather Stations\"\n driver = self.driver\n driver.get('http://idtg.carsprogram.org:80/rwis_v1/api/stations')\n tgWebList = {}\n\n data = driver.find_element_by_tag_name('body').text\n jsonData = json.loads(data)\n\n for item in jsonData:\n IDNum = item.get('id')\n locationName = item.get('name')\n tgWebList[IDNum] = locationName\n\n for stationsNum in tgWebList:\n testURL = 'http://hb.511.idaho.gov/#rwis/albumView/' + str(stationsNum) + '?timeFrame=TODAY&layers=rwis'\n driver.get(testURL)\n\n try:\n albumViewWait = WebDriverWait(driver, 20).until(EC.presence_of_element_located((By.ID, 'galleryPopup')))\n assert driver.find_element_by_id('galleryPopup') # .is_displayed()\n except:\n driver.save_screenshot('testing123.png')\n print passesNum\n assert False\n\n def tearDown(self):\n print \"Test Completed\"\n self.driver.quit()\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.6421499252319336, "alphanum_fraction": 0.6541725397109985, "avg_line_length": 38.644859313964844, "blob_id": "9d61597176fb1cf5bede8f82475f569a9af37a68", "content_id": "09d45b5cc3c2557ade1a196ab4c0d86a833790c0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4242, "license_type": "no_license", "max_line_length": 172, "num_lines": 107, "path": "/VerifyCreateAndDeleteRoute.py", "repo_name": "RyanJKavanaugh/QATest101", "src_encoding": "UTF-8", "text": "from selenium import webdriver\nfrom selenium import webdriver\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import Select\nfrom selenium.webdriver.common import action_chains, keys\nfrom selenium.webdriver.common.by import By\nimport time\nimport unittest\nfrom pyvirtualdisplay import Display\n# -*- coding: utf-8 -*-\n\n\ndisplay = Display(visible=0, size=(800, 800))\ndisplay.start()\n\nclass Verify_Login_And_Saving_Routes(unittest.TestCase):\n\n def setUp(self):\n self.driver = webdriver.Chrome()\n print '\\n' + \"Test for ID: Verifying login and saving routes features\" + '\\n'\n\n\n def test_login_route_creation_and_deletion(self):\n\n driver = self.driver\n driver.maximize_window()\n\n # HEAD TO CO WEBSITE\n driver.get(\"http://hb.511.idaho.gov/#roadReports?timeFrame=TODAY&layers=roadReports%2CwinterDriving%2CweatherWarnings%2CotherStates\")\n\n # SELECT THE FAVORITE PAGE\n time.sleep(4)\n signInButton = driver.find_element_by_id('favoriteBtn')\n signInButton.click()\n\n # LOGIN INFO/LOGIN BUTTON\n time.sleep(2)\n driver.find_element_by_id('userAccountEmail').send_keys('ryan.kavanaugh@crc-corp.com') # Login\n driver.find_element_by_id('userAccountPassword').send_keys('test')\n driver.find_element_by_id('userAccountPassword').submit()\n\n # HEAD TO THE SEARCH PAGE\n time.sleep(2)\n driver.find_element_by_id('searchBtn').click()\n\n # ENTER LOCATIONS A & B\n time.sleep(2)\n driver.find_element_by_id('address0').send_keys('Idaho Falls, ID')\n time.sleep(2)\n driver.find_element_by_id('address0').send_keys(Keys.RETURN)\n driver.find_element_by_id('address1').send_keys('Sun Valley, ID')\n time.sleep(2)\n driver.find_element_by_id('address1').send_keys(Keys.RETURN)\n time.sleep(2)\n driver.find_element_by_id('pickARouteSearchBtn').click()\n\n # SAVE THE LINK\n time.sleep(2)\n driver.find_element_by_xpath('//*[@id=\"leftPanelContent\"]/div/div[3]/a').click() # Clicking the save this link\n\n # CLICK SUBMIT\n time.sleep(2)\n driver.find_element_by_xpath('//*[@id=\"save-route-form\"]/button').submit() # Clicking the submit button\n\n # ASSERT THE SAVE FUNCTION WORKED AND WE ARE NOW ON THE 'FAVORITES' PAGE\n pageLoadWait = WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.ID, \"favorites-content-area\")))\n assert (driver.find_element_by_id(\"favorites-content-area\").is_displayed()), 'Event Edits Creation Button Is Not Displayed' # Did we make it to the 'Favorites' page\n\n\n# driver.save_screenshot('FavoritesPageScreenShot.png')\n\n routeHamburgerMenuWait = WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.XPATH, '//*[@title=\"Customize and control Your 511\"]')))\n\n driver.find_element_by_xpath('//*[@title=\"Customize and control Your 511\"]').click()\n driver.find_element_by_xpath(\"//*[contains(text(), 'Delete this route')]\").click()\n alert = driver.switch_to.alert.accept()\n\n keepRunningLoop = True\n\n while (keepRunningLoop):\n try:\n menuWait = WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.XPATH, '//*[@title=\"Customize and control Your 511\"]')))\n driver.find_element_by_xpath('//*[@title=\"Customize and control Your 511\"]').click()\n\n deleteWait = WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.XPATH, \"//*[contains(text(), 'Delete this route')]\")))\n driver.find_element_by_xpath(\"//*[contains(text(), 'Delete this route')]\").click()\n alert = driver.switch_to.alert.accept()\n except:\n break\n\n try:\n driver.find_element_by_xpath('//*[@title=\"Customize and control Your 511\"]').is_displayed()\n assert False\n except:\n assert True\n\n\n def tearDown(self):\n print '\\n' + \"Test Completed\"\n self.driver.quit()\n\n\nif __name__ == '__main__':\n unittest.main()\n" } ]
6
thaiat/node-python-flask
https://github.com/thaiat/node-python-flask
cdd06b4b3065f8e91ab730a114254d0958090061
9351099e4929fd946cf0f45b2217ca3d83b4603f
e60c727856e855f187805d32439799273507b7c8
refs/heads/master
2021-01-19T02:51:35.744160
2016-06-06T05:43:55
2016-06-06T05:43:55
46,794,526
1
0
null
2015-11-24T13:48:54
2015-11-24T13:49:27
2015-11-24T17:46:56
Python
[ { "alpha_fraction": 0.6587872505187988, "alphanum_fraction": 0.6731757521629333, "avg_line_length": 24.605262756347656, "blob_id": "50fe44f5a4bca59d7d834a65d17124a1aaf849a2", "content_id": "619e1cc0208d0830da29645f52887ba13edeea95", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 973, "license_type": "no_license", "max_line_length": 71, "num_lines": 38, "path": "/app.py", "repo_name": "thaiat/node-python-flask", "src_encoding": "UTF-8", "text": "import os\nfrom flask import Flask, request, jsonify\nimport logging\nimport video as video\nimport datetime\n\n\napp = Flask(__name__)\n\nstream_handler = logging.StreamHandler()\napp.logger.addHandler(stream_handler)\napp.logger.setLevel(logging.INFO) # set the desired logging level here\napp.logger.info('app started')\n\n\n@app.route('/')\ndef index():\n return \"Welcome Heroku Flask!\"\n\n\n@app.route('/api/videos/process', methods=['POST'])\ndef videos_process():\n start = datetime.datetime.now()\n jsonBody = request.get_json(silent=True)\n content = jsonBody.get('content')\n\n caras = video.process(content, app)\n\n app.logger.info('[INFO] Detection: {0}ms, {1}'.format(\n (datetime.datetime.now() - start).total_seconds()*1000, caras))\n\n # return json.dumps(result)\n retval = jsonify({'results': {'caras': caras}})\n return retval\n\nif __name__ == '__main__':\n port = int(os.environ.get('PORT', 5000))\n app.run(host='0.0.0.0', port=port, debug=True)\n" }, { "alpha_fraction": 0.5776545405387878, "alphanum_fraction": 0.6093502640724182, "avg_line_length": 33.10810852050781, "blob_id": "d11d232cb8dd74719dde3256d46fa90e5424a6b9", "content_id": "1a8dafa326cac801bc824b448c553c672bea934c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2524, "license_type": "no_license", "max_line_length": 79, "num_lines": 74, "path": "/video3.py", "repo_name": "thaiat/node-python-flask", "src_encoding": "UTF-8", "text": "import re\nimport cStringIO\nfrom PIL import Image\nimport numpy as np\nimport cv2\nfrom imutils.object_detection import non_max_suppression\n\nIMAGE_SIZE = 80.0\nIMAGE_PADDING = 5\nMATCH_THRESHOLD = 15\norb = cv2.ORB(1000, 1.2)\nbf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)\n#cascade = cv2.CascadeClassifier('classifier/fifa.xml')\n#reference = cv2.imread('images/fifaref2.jpg')\ncascade = cv2.CascadeClassifier('classifier/swbattlefront_small.xml')\nreference = cv2.imread('images/swbattlefront.jpg')\n\nreference = cv2.cvtColor(reference, cv2.COLOR_RGB2GRAY)\nratio = IMAGE_SIZE/reference.shape[1]\nreference = cv2.resize(\n reference, (int(IMAGE_SIZE), int(reference.shape[0]*ratio)))\nkp_r, des_r = orb.detectAndCompute(reference, None)\n\n\ndef process(content, app):\n if not isinstance(content, unicode):\n return []\n image_data = re.sub('^data:image/.+;base64,', '', content).decode('base64')\n image = Image.open(cStringIO.StringIO(image_data))\n image = cv2.cvtColor(np.array(image), 2)\n # cv2.imwrite('image.jpg', image)\n\n gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n\n faces = cascade.detectMultiScale(image, 1.03, 10, minSize=(15, 15))\n\n if(len(faces) <= 0):\n return []\n app.logger.info('faces {0}'.format(len(faces)))\n\n rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in faces])\n pick = non_max_suppression(rects, probs=None, overlapThresh=0.15)\n app.logger.info('pick {0}'.format(len(pick)))\n\n good = []\n for (x, y, x2, y2) in pick:\n\n obj = gray[(y-IMAGE_PADDING):(y2+IMAGE_PADDING),\n (x-IMAGE_PADDING):(x2+IMAGE_PADDING)]\n if obj.shape[0] == 0 or obj.shape[1] == 0:\n continue\n ratio = IMAGE_SIZE/obj.shape[1]\n obj = cv2.resize(obj, (int(IMAGE_SIZE), int(obj.shape[0]*ratio)))\n # find the keypoints and descriptors for object\n kp_o, des_o = orb.detectAndCompute(obj, None)\n if len(kp_o) == 0:\n continue\n\n # match descriptors\n matches = bf.match(des_r, des_o)\n app.logger.info('matches {0}'.format(len(matches)))\n if(len(matches) >= MATCH_THRESHOLD):\n good.append(\n {'x': x*1, 'y': y*1, 'width': (x2-x)*1, 'height': (y2-y)*1})\n\n # for f in good:\n # cv2.rectangle(\n # image,\n # (f.get('x'), f.get('y')),\n # (f.get('width'), f.get('height')),\n # (0, 255, 0), 6)\n # cv2.imwrite('image.jpg', image)\n app.logger.info('good {0}'.format(len(good)))\n return good\n" }, { "alpha_fraction": 0.6934460997581482, "alphanum_fraction": 0.7167019248008728, "avg_line_length": 21.571428298950195, "blob_id": "cd0e701515fc3c51243d22452f4ebc837cee67a7", "content_id": "cd88e24ebd96d740c9481ccd7088f3be18814bb1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 473, "license_type": "no_license", "max_line_length": 110, "num_lines": 21, "path": "/README.md", "repo_name": "thaiat/node-python-flask", "src_encoding": "UTF-8", "text": "## Destruction\n```bash\nheroku destroy node-python-flask --confirm node-python-flask\n```\n\n## Installation\n### WORKING WITH PYTHON 2.7.10 and OPENCV 2.4.11\n```bash\nheroku create node-python-flask --buildpack https://github.com/heroku/heroku-buildpack-nodejs.git --region eu\nheroku buildpacks:add --index 2 https://github.com/diogojc/heroku-buildpack-python-opencv-scipy.git#cedar14\n```\n\n\n## Procfile\n```\nweb: gunicorn --log-file=- app:app\n```\nor\n```\nweb: python app.py\n```" }, { "alpha_fraction": 0.45812806487083435, "alphanum_fraction": 0.6995074152946472, "avg_line_length": 14.692307472229004, "blob_id": "5861be9f1ea48b0c446bbc13bca342feb80ac4c2", "content_id": "ce9508ee7b07aa222490504ef733cd24b4f62689", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 203, "license_type": "no_license", "max_line_length": 21, "num_lines": 13, "path": "/requirements.txt", "repo_name": "thaiat/node-python-flask", "src_encoding": "UTF-8", "text": "Flask==0.10.1\nFlask-RESTful==0.2.12\nJinja2==2.7.3\nMarkupSafe==0.23\nWerkzeug==0.9.6\naniso8601==0.82\ngunicorn==19.0.0\nitsdangerous==0.24\npytz==2014.4\nsix==1.7.2\nPillow==3.0.0\nrequests==2.8.1\nimutils==0.3.1" } ]
4
gingeralesy/.i3
https://github.com/gingeralesy/.i3
4f8664162d2607ebe90b33af9b902dd84b0bbd14
8281a37d8c8336ac53d25b24d77619cb737347a7
3c6bbfbaf9cc7be18e8679ad0ec105182fafde67
refs/heads/master
2021-01-18T13:02:20.166089
2017-02-02T13:59:03
2017-02-02T13:59:03
80,721,615
0
1
null
2017-02-02T12:11:09
2016-11-02T08:32:05
2017-01-04T20:25:50
null
[ { "alpha_fraction": 0.6883628964424133, "alphanum_fraction": 0.7001972198486328, "avg_line_length": 29.727272033691406, "blob_id": "bd058d208a2e27a175113fd7a011e87e39cd9c0e", "content_id": "ae40c2e490a5d3ff0b911ce099bc4a04fa559960", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1014, "license_type": "no_license", "max_line_length": 54, "num_lines": 33, "path": "/README.md", "repo_name": "gingeralesy/.i3", "src_encoding": "UTF-8", "text": "External dependencies: \n\n* `i3-wm`\n* `i3pystatus-git`\n* `python-netifaces`\n* `python-psutil`\n* `python-colour`\n* `dmenu-xft-mouse-height-fuzzy-history`\n* `nemo` (optional, good file manager)\n* `clipit` (optional, good clipboard manager)\n* `nitrogen` (optional, wallpaper)\n* `twmn-git` (optional, notifications)\n* `xbrightness` (optional, brightness media keys)\n* `pulseaudio` (otpional, volume media keys)\n\nKeyboard shortcuts overview (emacs notation):\n\n* `M-RET` open a terminal\n* `M-d` open `nemo`\n* `M-r` run the launcher\n* `M-f` toggle floating\n* `M-S-f` toggle fullscreen\n* `M-ESC` close the window\n* `M-TAB` cycle through windows on workspace\n* `M-S-h/v` split horizontally/vertically\n* `M-S-C-h/v` switch to horizontal/vertical split\n* `M-C-LEFT/RIGHT` switch workspaces\n* `M-C-0/1/2/3..` switch to specific workspace\n* `M-C-S-LEFT/RIGHT` move workspace to other monitor\n* `M-S-LEFT/RIGHT/UP/DOWN` move the window\n* `M-C-S-0/1/2/3...` move window to specific workspace\n* `M-C-c` reload i3\n* `M-C-q` quit i3\n" }, { "alpha_fraction": 0.48521357774734497, "alphanum_fraction": 0.5010952949523926, "avg_line_length": 34.11538314819336, "blob_id": "7a3cf92f4cebe347fc417011874d44e30ecd65e1", "content_id": "24f5ed46ce26244a86c501a3dc85f8b837fdd1e3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1830, "license_type": "no_license", "max_line_length": 120, "num_lines": 52, "path": "/status.py", "repo_name": "gingeralesy/.i3", "src_encoding": "UTF-8", "text": "from i3pystatus import Status\nimport os\nimport netifaces\n\nstatus = Status(standalone=True)\n\ndef format_label(label, text):\n return \"<span color=\\\"#FFDD00\\\">\"+label+\"</span> \"+text\n\nstatus.register(\"text\", text=\"\")\n\nstatus.register(\"clock\",\n format=format_label(\"DATE\",\"%a %-d %b %X\"),\n hints={\"markup\":\"pango\"})\n\nfor interface in ['wlp4s0','enp7s0']:\n if interface in netifaces.interfaces():\n status.register(\"network\",\n interface=interface,\n format_up=format_label(\"NET\",\"{v4}\"),\n format_down=\"NET DOWN\",\n color_up=\"#FFFFFF\",\n color_down=\"#FF0000\",\n hints={\"markup\":\"pango\"})\n\nfor power_supply in os.listdir(\"/sys/class/power_supply/\"):\n type_file = open(\"/sys/class/power_supply/\" + power_supply + \"/type\", \"r\")\n if type_file.readline().startswith(\"Battery\"):\n type_file.close()\n status.register(\"battery\",\n format=format_label(\"BAT\",\"{percentage:.2f}%{status} {remaining:%E%hh:%Mm} {consumption:.2f}W\"),\n alert=True,\n alert_percentage=5,\n status={\"DIS\":\"↓\", \"CHR\":\"↑\", \"FULL\":\"=\"},\n hints={\"markup\":\"pango\"})\n break\n type_file.close()\n\nstatus.register(\"mem\",\n format=format_label(\"MEM\",\"{percent_used_mem:02}%\"),\n warn_percentage=75,\n alert_percentage=90,\n color=\"#FFFFFF\",\n warn_color=\"#FFFF00\",\n alert_color=\"#FF0000\",\n hints={\"markup\":\"pango\"})\n\nstatus.register(\"cpu_usage\",\n format=format_label(\"CPU\",\"{usage:02}%\"),\n hints={\"markup\":\"pango\"})\n\nstatus.run()\n" } ]
2
mariapautz/Simulador-de-Fila
https://github.com/mariapautz/Simulador-de-Fila
34b8d641fa49070ead3c98a0852c09053ff290cb
f3df0658bdf693c137eb9a425b665c14e9eef7fd
e461ae4e4cca5703187043cec1292091d935a547
refs/heads/master
2020-06-15T08:28:38.389327
2019-07-04T13:40:40
2019-07-04T13:40:40
195,227,439
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5245416760444641, "alphanum_fraction": 0.5434653759002686, "avg_line_length": 28.654544830322266, "blob_id": "0a7cbbd1185db7c5339a3fa4d087b6c431b9718c", "content_id": "a69839b1b532532e86a4b1fdb82b510e7e85df60", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1712, "license_type": "no_license", "max_line_length": 115, "num_lines": 55, "path": "/trabalho_simulador_fila.py", "repo_name": "mariapautz/Simulador-de-Fila", "src_encoding": "UTF-8", "text": "import random\r\nlista_idoso = []\r\nlista_gestante = []\r\nlista_comum = []\r\ndef classificaçao():\r\n classificação = int(input(\"Insira 1 para idoso, 2 para gestante e 3 para pessoa comum: \"))\r\n if classificação == 1:\r\n a = random.randint(1,50)\r\n print (\"Sua senha é \" + str(a))\r\n lista_idoso.append(a)\r\n \r\n elif classificação == 2:\r\n b = random.randint(1,50)\r\n print (\"Sua senha é \" + str(b))\r\n lista_gestante.append(b)\r\n elif classificação == 3:\r\n c = random.randint(1,50)\r\n print (\"Sua senha é \" + str(c))\r\n lista_comum.append(c)\r\n else:\r\n print (\"Este comando não existe\")\r\ndef prox_senha():\r\n while len(lista_idoso) != 0:\r\n print (\"A senha da vez é \" + str(lista_idoso[0]))\r\n lista_idoso.pop(0)\r\n \r\n while len(lista_gestante) != 0:\r\n print (\"A senha da vez é \" + str(lista_gestante[0]))\r\n lista_gestante.pop(0)\r\n \r\n while len(lista_comum) != 0:\r\n print (\"A senha da vez é \" + str(lista_comum[0]))\r\n lista_comum.pop(0)\r\ndef mostrar():\r\n print (\"a lista de idosos é \" + str(lista_idoso))\r\n print (\"a lista de gestantes é \" + str(lista_gestante))\r\n print (\" a lista comum é \" + str(lista_comum))\r\n\r\n \r\nrodando = True\r\nwhile rodando:\r\n\r\n menu = int(input(\"Digite 1 para inserir; 2 para remover; 3 para mostrar as filas e 4 para sair do programa: \"))\r\n if menu == 1:\r\n classificaçao()\r\n elif menu == 2:\r\n prox_senha()\r\n \r\n elif menu == 3:\r\n mostrar()\r\n elif menu == 4:\r\n print (\"tenha um bom dia :)\")\r\n rodando = False\r\n else:\r\n print (\"este comando não existe!\")\r\n \r\n" } ]
1
gugu/secret.ly
https://github.com/gugu/secret.ly
53f2f4a455063248a107685442a1ae772d206f47
3643ded11cedd89c3eebb27101fb5644875a0639
00579ecbb950e00a0e9dba0e217540e7c6268fbd
refs/heads/master
2021-07-01T13:55:03.452394
2016-03-20T20:20:04
2016-03-20T20:20:04
54,338,462
0
0
null
2016-03-20T20:22:16
2016-03-20T20:22:31
2021-06-11T13:57:11
Python
[ { "alpha_fraction": 0.8452380895614624, "alphanum_fraction": 0.8452380895614624, "avg_line_length": 9.5, "blob_id": "f959b71680365fb0be5e300fa22c9334030861d4", "content_id": "54a9522ea3ae8d730e9871003ee73e1749c474f5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 84, "license_type": "no_license", "max_line_length": 16, "num_lines": 8, "path": "/requirements.txt", "repo_name": "gugu/secret.ly", "src_encoding": "UTF-8", "text": "flask\nflask-cli\nclick\nsqlalchemy\nflask-log\nflask-alembic\nflask-sqlalchemy\nflask-wtf\n" }, { "alpha_fraction": 0.709466814994812, "alphanum_fraction": 0.7116430997848511, "avg_line_length": 42.761905670166016, "blob_id": "f4ba7e7f6de8728e838339bb968c9afce9eaa08d", "content_id": "8e9f24a91ad7ec5c67258165f00395e356e29744", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 919, "license_type": "no_license", "max_line_length": 93, "num_lines": 21, "path": "/secretly/forms/auth.py", "repo_name": "gugu/secret.ly", "src_encoding": "UTF-8", "text": "from flask_wtf import Form\nfrom wtforms import StringField, PasswordField\nfrom wtforms.validators import DataRequired, Email, ValidationError, EqualTo\nfrom ..models.user import User\n\nclass LoginForm(Form):\n email = StringField('Email', validators=[DataRequired(), Email()])\n password = PasswordField('Password', validators=[DataRequired()])\n\n def validate_email(self, email):\n if not User.query.filter_by(email=email.data).count():\n raise ValidationError('User not found')\n\nclass SignupForm(Form):\n email = StringField('Email', validators=[DataRequired(), Email()])\n password = PasswordField('Password', validators=[DataRequired(), EqualTo('password2')])\n password2 = PasswordField('Confirm Password', validators=[DataRequired()])\n\n def validate_email(self, email):\n if User.query.filter_by(email=email.data).count():\n raise ValidationError('User already exists')\n" }, { "alpha_fraction": 0.6792364716529846, "alphanum_fraction": 0.6806331276893616, "avg_line_length": 33.095237731933594, "blob_id": "bc50007d7e89a60159f88eedad7882d07cfe7bf1", "content_id": "380ad40d52825df7393a231ee3970fd08642d928", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2148, "license_type": "no_license", "max_line_length": 93, "num_lines": 63, "path": "/secretly/feed.py", "repo_name": "gugu/secret.ly", "src_encoding": "UTF-8", "text": "from flask import Blueprint, render_template, request, redirect, url_for\n\nfrom .models.feed import Record, Like, Comment, CommentLike\nfrom .db import db\nfrom .forms.feed import PostForm, CommentForm\n\nfeed = Blueprint('feed', __name__, template_folder='templates')\n\n\n@feed.route('/')\ndef index():\n return render_template('feed.html', feed=Record.query.all()[0:10])\n\n\n@feed.route('/like/<record_id>')\ndef like(record_id):\n if Like.query.filter_by(record_id=record_id, owner_id=request.user.id).count():\n return redirect('/')\n like = Like(record_id=record_id, owner_id=request.user.id)\n db.session.add(like)\n db.session.commit()\n return redirect('/')\n\n\n@feed.route('/comment_like/<comment_id>')\ndef comment_like(comment_id):\n comment = Comment.query.get(comment_id)\n redirect_url = url_for('.comments', record_id=comment.record.id)\n if CommentLike.query.filter_by(comment_id=comment_id, owner_id=request.user.id).count():\n return redirect(redirect_url)\n like = CommentLike(comment_id=comment_id, owner_id=request.user.id)\n db.session.add(like)\n db.session.commit()\n return redirect(redirect_url)\n\n\n@feed.route('/comments/<record_id>')\ndef comments(record_id):\n record = Record.query.get(record_id)\n comments = Comment.query.filter_by(record_id=record_id)\n return render_template('comments.html', comments=comments, record=record)\n\n\n@feed.route('/post', methods=['GET', 'POST'])\ndef post():\n form = PostForm()\n if form.validate_on_submit():\n record = Record(text=form.text.data, owner_id=request.user.id)\n db.session.add(record)\n db.session.commit()\n return redirect('/')\n return render_template('post.html', form=form)\n\n\n@feed.route('/new_comment/<record_id>', methods=['GET', 'POST'])\ndef new_comment(record_id):\n form = CommentForm()\n if form.validate_on_submit():\n comment = Comment(text=form.text.data, record_id=record_id, owner_id=request.user.id)\n db.session.add(comment)\n db.session.commit()\n return redirect(url_for('.comments', record_id=record_id))\n return render_template('new_comment.html', form=form)\n" }, { "alpha_fraction": 0.6251167058944702, "alphanum_fraction": 0.6451914310455322, "avg_line_length": 32.46875, "blob_id": "e40b2a9546e2a54719a27addb325862df6bc5308", "content_id": "f2b918012d0fd1da44efb0cfd4449aa02b741143", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2142, "license_type": "no_license", "max_line_length": 64, "num_lines": 64, "path": "/migrations/35b01228258_initial.py", "repo_name": "gugu/secret.ly", "src_encoding": "UTF-8", "text": "\"\"\"initial\n\nRevision ID: 35b01228258\nRevises: \nCreate Date: 2016-03-20 21:11:31.397885\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '35b01228258'\ndown_revision = None\nbranch_labels = ('default',)\ndepends_on = None\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('users',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=255), nullable=True),\n sa.Column('email', sa.String(length=255), nullable=True),\n sa.Column('password', sa.String(length=255), nullable=True),\n sa.Column('salt', sa.String(length=255), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('records',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('owner_id', sa.Integer(), nullable=True),\n sa.Column('text', sa.Text(), nullable=True),\n sa.Column('created_at', sa.DateTime(), nullable=True),\n sa.ForeignKeyConstraint(['owner_id'], ['users.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('comments',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('record_id', sa.Integer(), nullable=True),\n sa.Column('owner_id', sa.Integer(), nullable=True),\n sa.Column('text', sa.Text(), nullable=True),\n sa.ForeignKeyConstraint(['owner_id'], ['users.id'], ),\n sa.ForeignKeyConstraint(['record_id'], ['records.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('likes',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('record_id', sa.Integer(), nullable=True),\n sa.Column('owner_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['owner_id'], ['users.id'], ),\n sa.ForeignKeyConstraint(['record_id'], ['records.id'], ),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('record_id', 'owner_id')\n )\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('likes')\n op.drop_table('comments')\n op.drop_table('records')\n op.drop_table('users')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.6583665609359741, "alphanum_fraction": 0.6593625545501709, "avg_line_length": 30.375, "blob_id": "7c86a7d6057f0582b8ae2350a9d7ee4db6d5720a", "content_id": "a8febfa3b79caace4797a5761e186d88f874cb38", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1004, "license_type": "no_license", "max_line_length": 95, "num_lines": 32, "path": "/secretly/auth.py", "repo_name": "gugu/secret.ly", "src_encoding": "UTF-8", "text": "from flask import Blueprint, render_template\nfrom .models.user import User\nfrom .forms.auth import LoginForm, SignupForm\nfrom flask import session, redirect\nfrom crypt import crypt\nfrom .db import db\nfrom .utils import random_string\n\nauth = Blueprint('auth', __name__, template_folder='templates')\n\n\n@auth.route('/login', methods=['GET', 'POST'])\ndef login():\n form = LoginForm()\n if form.validate_on_submit():\n user = User(email=form.email.data)\n session['user_id'] = user.id\n return redirect('/')\n return render_template('login.html', form=form)\n\n\n@auth.route('/signup', methods=['GET', 'POST'])\ndef signup():\n form = SignupForm()\n if form.validate_on_submit():\n salt = random_string(8)\n user = User(email=form.email.data, password=crypt(form.password.data, salt), salt=salt)\n db.session.add(user)\n db.session.commit()\n session['user_id'] = user.id\n return redirect('/')\n return render_template('signup.html', form=form)\n" }, { "alpha_fraction": 0.6127703189849854, "alphanum_fraction": 0.6683831214904785, "avg_line_length": 25.97222137451172, "blob_id": "9e153c7cc974d516aac65fc662997242d3f3a10c", "content_id": "c097bcbd28c28f563edb34069706dc4cb04a0f23", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 971, "license_type": "no_license", "max_line_length": 63, "num_lines": 36, "path": "/migrations/143a6c43a0c_comment_like.py", "repo_name": "gugu/secret.ly", "src_encoding": "UTF-8", "text": "\"\"\"comment_like\n\nRevision ID: 143a6c43a0c\nRevises: 35b01228258\nCreate Date: 2016-03-20 22:13:32.702509\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '143a6c43a0c'\ndown_revision = '35b01228258'\nbranch_labels = ()\ndepends_on = None\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('comment_likes',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('comment_id', sa.Integer(), nullable=True),\n sa.Column('owner_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['comment_id'], ['comments.id'], ),\n sa.ForeignKeyConstraint(['owner_id'], ['users.id'], ),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('comment_id', 'owner_id')\n )\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('comment_likes')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.7334024906158447, "alphanum_fraction": 0.7334024906158447, "avg_line_length": 25.77777862548828, "blob_id": "8c7bab121553d98fce211d201e146f5053cd7ec8", "content_id": "ea75eca1e51bde8873cbaaac60bca3be8fa99fa7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 964, "license_type": "no_license", "max_line_length": 64, "num_lines": 36, "path": "/app.py", "repo_name": "gugu/secret.ly", "src_encoding": "UTF-8", "text": "from flask import Flask, session, request\nfrom flask.ext.log import Logging\nfrom flask_cli import FlaskCLI\nfrom flask_alembic.cli.click import cli as alembic_cli\nfrom flask_alembic import Alembic\n\nfrom .secretly.db import db\nfrom .secretly.feed import feed\nfrom .secretly.auth import auth\n\n\napp = Flask(__name__)\napp.config['DEBUG'] = True\napp.config['FLASK_LOG_LEVEL'] = 'DEBUG'\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////tmp/test.db'\napp.config['SQLALCHEMY_ECHO'] = True\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.config['SECRET_KEY'] = 'eps-Oyb-Toj-fI'\n\nFlaskCLI(app)\nflask_log = Logging(app)\ndb.init_app(app)\napp.cli.add_command(alembic_cli, 'db')\n\nalembic = Alembic()\nalembic.init_app(app)\n\napp.register_blueprint(auth, url_prefix='/auth')\napp.register_blueprint(feed)\n\n\n@app.before_request\ndef before_request():\n from .secretly.models.user import User\n if session.get('user_id'):\n request.user = User.query.get(session['user_id'])\n" }, { "alpha_fraction": 0.8181818127632141, "alphanum_fraction": 0.8181818127632141, "avg_line_length": 28.33333396911621, "blob_id": "b5af156b02744b05959bd36475366f9a0c2d3118", "content_id": "53be0853b3fdfd5dc1e00b83b2feec154bb1634e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 88, "license_type": "no_license", "max_line_length": 39, "num_lines": 3, "path": "/secretly/db.py", "repo_name": "gugu/secret.ly", "src_encoding": "UTF-8", "text": "from flask_sqlalchemy import SQLAlchemy\nfrom flask import current_app\ndb = SQLAlchemy()\n" }, { "alpha_fraction": 0.6225165724754333, "alphanum_fraction": 0.6456953883171082, "avg_line_length": 29.200000762939453, "blob_id": "2116c3d2d9d32a77f7e8ac3fd882fe6415257d29", "content_id": "a29634ce5e5661d9cf284cfe7c2ecff9042883f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 604, "license_type": "no_license", "max_line_length": 78, "num_lines": 20, "path": "/secretly/models/user.py", "repo_name": "gugu/secret.ly", "src_encoding": "UTF-8", "text": "from sqlalchemy import Column, Integer, String\nfrom ..db import db\nimport hashlib\n\n\nclass User(db.Model):\n __tablename__ = 'users'\n id = Column(Integer, primary_key=True)\n name = Column(String(255))\n email = Column(String(255))\n password = Column(String(255))\n salt = Column(String(255))\n comments = db.relationship('Comment', backref='owner')\n likes = db.relationship('Like', backref='owner')\n\n @property\n def avatar(self):\n m = hashlib.md5()\n m.update(self.salt.encode('utf-8'))\n return 'http://www.gravatar.com/avatar/%s?d=monsterid' % m.hexdigest()\n" }, { "alpha_fraction": 0.6576852202415466, "alphanum_fraction": 0.658297598361969, "avg_line_length": 29.811321258544922, "blob_id": "1d44331636c5be5a281af03c99dc8623701c6551", "content_id": "60eff22b43ed50d82a27ee31ff0c9758dcda3537", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1633, "license_type": "no_license", "max_line_length": 84, "num_lines": 53, "path": "/secretly/models/feed.py", "repo_name": "gugu/secret.ly", "src_encoding": "UTF-8", "text": "from sqlalchemy import Column, Integer, ForeignKey, Text, DateTime, UniqueConstraint\nfrom ..db import db\nimport datetime\n\n\nclass Record(db.Model):\n __tablename__ = 'records'\n id = Column(Integer, primary_key=True)\n owner_id = Column(ForeignKey('users.id'))\n text = Column(Text)\n created_at = Column(DateTime, default=datetime.datetime.now())\n\n comments = db.relationship('Comment', backref='record')\n likes = db.relationship('Like', backref='record')\n\n @property\n def like_count(self):\n return Like.query.filter_by(record_id=self.id).count()\n\n @property\n def comment_count(self):\n return Comment.query.filter_by(record_id=self.id).count() or 0\n\n\nclass Comment(db.Model):\n __tablename__ = 'comments'\n id = Column(Integer, primary_key=True)\n record_id = Column(ForeignKey('records.id'))\n owner_id = Column(ForeignKey('users.id'))\n text = Column(Text)\n likes = db.relationship('CommentLike', backref='comment')\n\n @property\n def like_count(self):\n return CommentLike.query.filter_by(comment_id=self.id).count()\n\n\nclass Like(db.Model):\n __tablename__ = 'likes'\n __table_args__ = (\n UniqueConstraint('record_id', 'owner_id'),)\n id = Column(Integer, primary_key=True)\n record_id = Column(ForeignKey('records.id'))\n owner_id = Column(ForeignKey('users.id'))\n\n\nclass CommentLike(db.Model):\n __tablename__ = 'comment_likes'\n __table_args__ = (\n UniqueConstraint('comment_id', 'owner_id'),)\n id = Column(Integer, primary_key=True)\n comment_id = Column(ForeignKey('comments.id'))\n owner_id = Column(ForeignKey('users.id'))\n" } ]
10
hGl0/Dynamics-of-the-climate-System-1
https://github.com/hGl0/Dynamics-of-the-climate-System-1
bd6e0244925da1c041972582c8d64f843c26a14c
c40081efa4582a6b24b3da634cee54b8b557ea7f
96f7ae1127e02bc19f0d4745f75e9a6793a865b4
refs/heads/master
2023-04-18T18:33:04.993426
2021-05-05T13:15:04
2021-05-05T13:15:04
347,983,275
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.49370503425598145, "alphanum_fraction": 0.5899280309677124, "avg_line_length": 28.945945739746094, "blob_id": "ab06c5e2828bee58c0906cd632fafccd9002f194", "content_id": "1968afe2fe3dc58de6a4ddc70209adbaa7fca33e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1114, "license_type": "no_license", "max_line_length": 67, "num_lines": 37, "path": "/myles.py", "repo_name": "hGl0/Dynamics-of-the-climate-System-1", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nT0 = 0 # Ausgangstemp\ntau1 = 30 # in Jahren\nT2xco2 = 3.2 # wird sich angenähert\nC0 = 280 # first value\nT = [T0]\n\ndf = pd.read_csv(\"rcp26.csv\")\nco = df['CO2']\n\ndef myles(T, tau, T2xco2, co, C0, df):\n for i in range(0, len(df)-1):\n temp = T[i]+((1/tau)*T2xco2*np.log2(co[i]/C0)-(1/tau)*T[i])\n T.append(temp)\n return T\n\nT1 = myles([T0], 10, T2xco2, co, C0, df)\nT2 = myles([T0], 20, T2xco2, co, C0, df)\nT3 = myles([T0], 30, T2xco2, co, C0, df)\nT5 = myles([T0], 50, T2xco2, co, C0, df)\nT10 = myles([T0], 100, T2xco2, co, C0, df)\nT20 = myles([T0], 200, T2xco2, co, C0, df)\n#Plottet Daten\nplt.plot(df['YEARS'], T1, label=\"Tau = 10\")\nplt.plot(df['YEARS'], T2, label=\"Tau = 20\")\nplt.plot(df['YEARS'], T3, label=\"Tau = 30\")\nplt.plot(df['YEARS'], T5, label=\"Tau = 50\")\nplt.plot(df['YEARS'], T10, label=\"Tau = 100\")\nplt.plot(df['YEARS'], T20, label=\"Tau = 200\")\nplt.xlabel(\"Time [Jahre]\")\nplt.ylabel(\"Temperature [°C]\")\nplt.legend()\nplt.title(\"CO2 Data from RCP 2.6, C_0=280, T_2xco2 = 3.2\")\nplt.show()\n\n\n\n\n" }, { "alpha_fraction": 0.47751322388648987, "alphanum_fraction": 0.570105791091919, "avg_line_length": 24.211111068725586, "blob_id": "efddcfd8189cc77028e089456e6dca8be6cfa1be", "content_id": "96aee2ed407fe07f229a85eb5248558beae585fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2269, "license_type": "no_license", "max_line_length": 124, "num_lines": 90, "path": "/myles_icealb.py", "repo_name": "hGl0/Dynamics-of-the-climate-System-1", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nrcp26 = pd.read_csv(\"rcp26.csv\")\nrcp45 = pd.read_csv('rcp45.csv')\nrcp60 = pd.read_csv('rcp60.csv')\nrcp85 = pd.read_csv('rcp85.csv')\n\nco = rcp26['CO2']\nT_init = 15\nT0 = 0 # Ausgangstemp\ntau = 30 # in Jahren\ntau2 = 1000\nT2xco2 = 3.2 # wird sich angenähert\nC0 = 280 # first value\nT = [T0]\nh = 0.1\nI0 = 340\nbolz = 5.67 * 10**(-8)\n\ndef ocean(T):\n integral = 0\n for i in range(0, len(T)-1):\n integral = (integral + (T[i]-T[i-1])/np.sqrt((len(T)-1-i)))\n return integral\n\ndef ice_albedo(T, t):\n Ts = (2/bolz*(1-alpha(T))*(np.sin(t*0.001)+1)*I0)**(1/4)-273.1\n return Ts\n\n# alpha operates on global temperature => +T_init\n# linear\ndef alpha(Temp):\n Temp += T_init\n if Temp >= -10 and Temp <= 20:\n return -1/60*Temp+8/15\n if Temp < -10:\n return 0.7\n if Temp > 20:\n return 0.2\n\n\ndef myles(T, co, h, a):\n for i in range(0, len(co)-1):\n if T[i] <= 0:\n tau2 = 10000\n if T[i] > 0:\n tau2 = 1000\n temp = T[i]+((1/tau)*T2xco2*np.log2((co[i])/C0)-(1/tau)*T[i])-h*ocean(T) + a*(1/tau2)*(ice_albedo(T[i], i) - Ts_ref)\n T.append(temp)\n return T\n\nTs_ref = 29.69\n\ndef glacial_carbon(E):\n co2 = [E]\n for i in range(0, 15000):\n co2.append(co2[i]+0.75*2-1/200*(co2[i])-h*ocean(co2))\n return co2\n\ndef carbon_rcp(E):\n co2 = [E]\n for i in range(0, 735):\n co2.append(co2[i]+(0.75*(co[i+1]-co[i])*10-1/200*(co2[i])-h*ocean(co2)))\n return co2\n\n\n#c_glac = glacial_carbon(C0)\nc_rcp = carbon_rcp(C0)\n\nno_ocean = myles([T0], c_rcp, 0, 0)\nocean_only = myles([T0], c_rcp, h, 0)\nocean_ice = myles([T0], c_rcp, h, 1)\nocean_ice_co = myles([T0], c_rcp, h, 1)\nocean_ice_corcp = myles([T0], c_rcp, h, 1)\n\n#glacial = myles([T0], c_glac, h, 1)\n\n\n#plt.plot(rcp26.YEARS[:700], no_ocean[:700], color=\"blue\")\n#plt.plot(rcp26.YEARS[:700], ocean_only[:700], color=\"orange\")\n#plt.plot(rcp26.YEARS[:700], ocean_ice[:700], color=\"green\")#\n#plt.plot(rcp26.YEARS[:700], ocean_ice_co[:700], color=\"red\")\n#plt.plot(rcp26.YEARS[:700], ocean_ice_corcp[:700], color=\"red\")\n#plt.plot(glacial)\n#plt.plot(c, color=\"orange\")\n#plt.plot(c_rcp, color=\"blue\")\n# plt.plot(a, color=\"green\")\nplt.show()" }, { "alpha_fraction": 0.5022233128547668, "alphanum_fraction": 0.5385375618934631, "avg_line_length": 22.53333282470703, "blob_id": "c9537722b663c6ac7455aef07d27bc24399bd3a8", "content_id": "66c1ad0b42520ea4a18946d240b8a113e8c2ffce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4049, "license_type": "no_license", "max_line_length": 97, "num_lines": 165, "path": "/GlacialModel.py", "repo_name": "hGl0/Dynamics-of-the-climate-System-1", "src_encoding": "UTF-8", "text": "import numpy as np\r\nimport matplotlib.pylab as plt\r\nimport cmath as mth\r\nimport random as rd\r\nimport pandas as pd\r\n\r\nI0 = 1350 # W/m2 solar insulation\r\npi = np.pi\r\nTabs0 = 273.15 # K\r\nsigma = 5.67e-8 # Stefan-Boltzmann constant\r\nbb_efficiency = 0.6 # black body efficiency\r\n\r\nKc = 3 # K climate sensitivity\r\nC0 = 280 # ppm\r\nalpha0 = 0.30581537 # 0.3\r\nalpha_cold = 0.6\r\nalpha_warm = 0.3\r\nTalpha_treshhold = Tabs0+11 # C\r\nTalpha_scale = 2 # K\r\n\r\ntaus = 1 # yr\r\ntauc = 25 # yrs\r\ntaudo = 200 # yrs\r\n\r\nhT = 1/np.sqrt(100) # 1/sqrt(yrs)\r\nhE = 1/1000 #h emissions, temperature feedback\r\n\r\n\r\neps_aa = 0.1\r\neps_ml = 0.5\r\neps_do = 0.25\r\n\r\n# numerical constant\r\ndt = 1 # yr\r\n\r\n# Function definitions\r\ndef Tsolar(alpha,t):\r\n val = (1-alpha)*((0.3*np.sin(t*0.01))+1.2)*I0/(4*sigma*bb_efficiency)\r\n val = pow(val,0.25)\r\n return val\r\n\r\ndef Tco2(C):\r\n val = Kc*np.log(C/C0)/np.log(2)\r\n return val\r\n\r\ndef alpha(T):\r\n val = alpha_cold + (alpha_warm - alpha_cold)/2*(1+np.tanh((T-Talpha_treshhold)/Talpha_scale))\r\n return val \r\n\r\ndef Ocean_uptake(Tc):\r\n ocean_uptake = 0\r\n t = len(Tc)-1\r\n for t_dash in range(0,t):\r\n ocean_uptake = ocean_uptake+(Tc[t_dash]-Tc[t_dash-1])/np.sqrt(t-t_dash)\r\n return(ocean_uptake)\r\n\r\n\r\ndef C_ocean_uptake(Cml):\r\n ocean_uptake = 0\r\n t = len(Cml)-1\r\n for t_dash in range(0,t):\r\n ocean_uptake = ocean_uptake+(Cml[t_dash]-Cml[t_dash-1])/np.sqrt(t-t_dash)\r\n return(ocean_uptake) \r\n\r\n# Emission time series\r\nEmissions = np.zeros([1500, 1])\r\n#Emissions[10] = 100\r\n\r\n#use rcp2.6 emissions for sanity check\r\n# =============================================================================\r\n#rcp26 = pd.read_csv('C:\\\\Users\\\\stecheme\\\\Documents\\\\RCP\\\\rcp26.csv') \r\n#C_rcp = rcp26['CO2']\r\n#Emissions = np.diff(C_rcp)\r\n# =============================================================================\r\ndef emissions(Emissions_in,T):\r\n E_new = Emissions_in+hE*(T-Tabs0)\r\n return(E_new)\r\n \r\n\r\n# =============================================================================\r\n# print(Tsolar(0.0))\r\n# print(Tco2(140))\r\n# print(len(Emissions))\r\n# \r\n# =============================================================================\r\n# variables\r\nTs = [] # surface temperature set by solar insulation\r\nTc = [] # surface temperature set by \r\nT = []\r\nTCelcius = []\r\n\r\nCaa = [] # C atmospheric accumulation\r\nCml = [] # C mixed layer\r\nCdo = [] # C deep ocean\r\nC = [] # C \r\nalp = [] # alpha\r\n\r\n# initialization\r\nTs_old = Tsolar(alpha0,0)\r\nTc_old = 0.0\r\nT_old = Ts_old + Tc_old\r\nalpha_old = alpha(T_old)\r\n\r\nCaa_old = 280.0 #C3\r\nCml_old = 0.0 #C1\r\nCdo_old = 0.0 #C2\r\nC_old = Caa_old + Cml_old + Cdo_old\r\n\r\nfor t in range(1,len(Emissions)):\r\n E = emissions(Emissions[t],T_old)\r\n \r\n Ts_new = Ts_old + dt * (Tsolar(alpha_old,t) - Ts_old/taus)\r\n Tc_new = Tc_old + dt * (Tco2(C_old)/tauc - Tc_old/tauc-hT*Ocean_uptake(Tc))\r\n T_new = Ts_new + Tc_new\r\n Caa_new = Caa_old + dt * (eps_aa*E)\r\n Cml_new = Cml_old + dt * (eps_ml*E-hT*C_ocean_uptake(Cml))\r\n Cdo_new = Cdo_old + dt * (eps_do*E - Cdo_old/taudo)\r\n C_new = Caa_new + Cml_new + Cdo_new\r\n\r\n alpha_new = alpha(T_new)\r\n \r\n # save\r\n Ts.append(Ts_new)\r\n Tc.append(Tc_new)\r\n T.append(T_new)\r\n TCelcius.append(T_new-Tabs0)\r\n Caa.append(Caa_new)\r\n Cml.append(Cml_new)\r\n Cdo.append(Cdo_new)\r\n C.append(C_new)\r\n alp.append(alpha_new)\r\n \r\n # update\r\n Ts_old = Ts_new\r\n Tc_old = Tc_new\r\n T_old = T_new\r\n Caa_old = Caa_new\r\n Cml_old = Cml_new\r\n Cdo_old = Cdo_new\r\n C_old = C_new\r\n alpha_old = alpha_new\r\n \r\nplt.figure()\r\nplt.plot(C)\r\n\r\nplt.figure()\r\nplt.plot(Ts)\r\n\r\nplt.figure()\r\nplt.plot(TCelcius)\r\nplt.xlabel(\"Years\")\r\nplt.ylabel(\"Temperature (°C)\")\r\nplt.title(\"Temperature change dependent on periodic I0\")\r\n\r\nplt.figure()\r\nplt.plot(alp)\r\n\r\nprint(alp[-1])\r\n\r\n#atest = []\r\n#Tint = np.linspace(-10,30,400)\r\n#for i in range(len(Tint)):\r\n# val = alpha(Tabs0+Tint[i])\r\n# print(i,Tint[i],val)\r\n# atest.append(val)\r\n" } ]
3
Vivivale6/ViviamAcuna_hw13
https://github.com/Vivivale6/ViviamAcuna_hw13
74df9918670e0a15770db2fe87c1e0153f40f48f
960d5e75cdaa02c6ba7aeedbe6d6c9ff879718c3
fc8caaef306eb386983c856d4d904a6603e4bd39
refs/heads/master
2020-03-15T01:29:46.371167
2018-05-02T20:29:24
2018-05-02T20:29:24
131,895,061
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5985307097434998, "alphanum_fraction": 0.6080380082130432, "avg_line_length": 21.25, "blob_id": "66bd6b3334a7d445c5d39d88fe03b4b3a2802d50", "content_id": "b8e5b294016e9f7ab378a20fe95c6cedca56a8a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2314, "license_type": "no_license", "max_line_length": 123, "num_lines": 104, "path": "/VA_MontaHall.py", "repo_name": "Vivivale6/ViviamAcuna_hw13", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom random import shuffle\n\n#Retorna un orden aleatorio de la lista\ndef sort_doors():\n lista = [\"goat\",\"goat\", \"car\"]\n \n np.random.shuffle(lista)\n \n return lista\n\nb=sort_doors()\n#Retorna un numero aleatorio entre 0,1,2\ndef choose_door():\n num=[0,1,2]\n\n a= np.random.choice(num)\n \n return a\n\nc=choose_door()\n#Funcion que retorna en que posicion se encuentra una cabra\n\ndef reveal_door(lista,choice):\n \n for i in range(len(lista)):\n \n if ( i!=choice)and(lista[i]==\"goat\"):\n \n lista[i]=\"GOAT_MONTY\"\n \n return lista\n\n#Funcion que finaliza el juego y relaciona si el jugador desea cambiar o no de puerta \ndef finish_game(lista,choice,change):\n \n if (change == False):\n \n return lista[choice]\n\n else :\n #Se tiene el condicional en el caso de que el jugador quiera cambiarse de puerta\n for i in range(len(lista)):\n \n if( i!=choice)and(lista[i]!=\"GOAT_MONTY\"):\n \n return lista[i]\n\n \n \nprint b,c\nprint reveal_door(b,c)\nprint finish_game(b,c,True)\n\n#Simulacion para diferentes escenarios del juego\nlis_true=[]\nfor i in range (99):\n\n #Numero de puerta\n a=sort_doors()\n #puerta que elige \n c=choose_door()\n #Se revela la puerta\n d=reveal_door(a,c)\n \n b=finish_game(d,c,True)\n lis_true.append(b)\n\nlis_false=[]\nfor i in range (99):\n \n #Numero de puerta\n a=sort_doors()\n #puerta que elige \n c=choose_door()\n #Se revela la puerta\n d=reveal_door(a,c)\n \n b=finish_game(d,c,False)\n lis_false.append(b)\n \nprint \"lista de True\",lis_true\nprint \"lista de false\",lis_false\n\n\n#Se busca saber cual es la probabilidad de ganar un carro cambiando de puerta o no \nwin_true=0.0\nwin_false=0.0\n\nfor i in range (len(lis_true)):\n #Probabilidad de ganar cambiando de puerta\n if (lis_true[i]==\"car\"):\n win_true+=1.0\n\nfor i in range (len(lis_false)):\n #Probabilidad de ganar sin cambiar de puerta\n if (lis_false[i]==\"car\"):\n win_false+=1.0\n \n \nprob1= win_true/len(lis_true)\nprob2=win_false/len(lis_false)\n\nprint \"La probabilidad de ganar cambiando de puerta es :\", prob1,\"La probabilidad de ganar sin cambiar de puerta es:\",prob2\n" } ]
1
sreejithdikru/Theblog
https://github.com/sreejithdikru/Theblog
b7eb8b4e4eb471ef0d6f2d9cf68584fa34d700f6
f0379fd2556466222900e2792829de0db445fc0c
8f44b0e9c3c46de2cd335ba04391da18874d1368
refs/heads/main
2023-07-01T03:27:00.442308
2021-07-31T08:53:59
2021-07-31T08:53:59
367,970,046
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5531167984008789, "alphanum_fraction": 0.5531167984008789, "avg_line_length": 33.48484802246094, "blob_id": "d824d625c83dc3a4bff01a6cbff5ff232094bcd5", "content_id": "a54462b3d8ea708ab2c4f1fb9facc8c1a6de1a85", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1139, "license_type": "no_license", "max_line_length": 121, "num_lines": 33, "path": "/theblog/forms.py", "repo_name": "sreejithdikru/Theblog", "src_encoding": "UTF-8", "text": "from django import forms\nfrom django.forms import fields, models, widgets\nfrom .models import *\n\n\nclass PostForm(forms.ModelForm):\n class Meta:\n model = Post\n fields = ('title', 'title_tag', 'author', 'body','upload_img')\n\n widgets = {\n\n 'title' : forms.TextInput(attrs= {'class': 'form-control'}),\n 'title_tag' : forms.TextInput(attrs= {'class': 'form-control'}),\n 'author' : forms.TextInput(attrs= {'class': 'form-control', 'value' : '', 'id':'writer', 'type' : 'hidden'}),\n #'author' : forms.Select(attrs= {'class': 'form-control'}),\n 'body' : forms.Textarea(attrs= {'class': 'form-control'}),\n 'upload_img' : forms.FileInput(attrs= {'class': 'form-control'}),\n\n }\n\nclass EditForm(forms.ModelForm):\n class Meta:\n model = Post\n fields = ('title', 'title_tag', 'body')\n\n widgets = {\n\n 'title' : forms.TextInput(attrs= {'class': 'form-control'}),\n 'title_tag' : forms.TextInput(attrs= {'class': 'form-control'}),\n 'body' : forms.Textarea(attrs= {'class': 'form-control'}),\n\n }\n\n" }, { "alpha_fraction": 0.5489022135734558, "alphanum_fraction": 0.6147704720497131, "avg_line_length": 24.049999237060547, "blob_id": "d6654eedaa346ca457ff3fe753f53a129a940df1", "content_id": "8a102f63e11b18cccd57190688b8fa6462f323ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 501, "license_type": "no_license", "max_line_length": 100, "num_lines": 20, "path": "/theblog/migrations/0007_post_upload_img.py", "repo_name": "sreejithdikru/Theblog", "src_encoding": "UTF-8", "text": "# Generated by Django 3.2.4 on 2021-06-16 19:23\n\nfrom django.db import migrations, models\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('theblog', '0006_auto_20210522_2152'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='post',\n name='upload_img',\n field=models.ImageField(default=django.utils.timezone.now, max_length=50, upload_to=''),\n preserve_default=False,\n ),\n ]\n" }, { "alpha_fraction": 0.7746478915214539, "alphanum_fraction": 0.7746478915214539, "avg_line_length": 34.25, "blob_id": "36136df515d94531f9ddf5c620a3fd8570c7ce85", "content_id": "8d2309a3b743a264a51fdf3dd6279e8e6837da98", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 142, "license_type": "no_license", "max_line_length": 82, "num_lines": 4, "path": "/README.md", "repo_name": "sreejithdikru/Theblog", "src_encoding": "UTF-8", "text": "# Theblog\nmy first blog and this a scalable one.\nFeature:\nuserauthentictaion,add post(text and image files), option to like a post and more.\n\n" }, { "alpha_fraction": 0.6700767278671265, "alphanum_fraction": 0.6700767278671265, "avg_line_length": 38.150001525878906, "blob_id": "a78ea503dbad04f3a70b9448fdd240c36a8f9e66", "content_id": "ba739f56122b972e50b678682666cb622245d757", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 782, "license_type": "no_license", "max_line_length": 91, "num_lines": 20, "path": "/theblog/urls.py", "repo_name": "sreejithdikru/Theblog", "src_encoding": "UTF-8", "text": "#from . import views\nfrom django.urls import path\nfrom .views import AddPostView, HomeView, ArticleDetailView, UpdatePostView, DeletePostView\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\n\nurlpatterns = [\n \n #path('', views.home, name=\"home\"),\n path('', HomeView.as_view() , name=\"home\"),\n path('article/<int:pk>', ArticleDetailView.as_view(), name=\"article-detail\"),\n path('add_post/', AddPostView.as_view(), name = 'add_post'),\n path('article/edit_post/<int:pk>', UpdatePostView.as_view(), name= 'edit_post'),\n path('article/<int:pk>/delete', DeletePostView.as_view(), name= 'delete_post'),\n]\n\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL,\n document_root=settings.MEDIA_ROOT)" }, { "alpha_fraction": 0.7052947282791138, "alphanum_fraction": 0.7052947282791138, "avg_line_length": 28.47058868408203, "blob_id": "af4f4392c208b09b700b0c45ac65516eb4c8b655", "content_id": "e4e59ff08ceedda77ad589eefebfb2ae75ef4a11", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1001, "license_type": "no_license", "max_line_length": 89, "num_lines": 34, "path": "/theblog/views.py", "repo_name": "sreejithdikru/Theblog", "src_encoding": "UTF-8", "text": "from django.shortcuts import render, redirect\nfrom django.urls.base import reverse_lazy\nfrom django.views.generic import ListView, DetailView, CreateView, UpdateView, DeleteView\nfrom django.views.generic.edit import DeleteView\nfrom .models import Post\nfrom .forms import EditForm, PostForm\nfrom django.urls import reverse_lazy\n#def home(request):\n # return render(request, 'home.html', {})\nclass HomeView(ListView):\n model = Post\n template_name = 'home.html'\n ordering = ['-post_date']\n\nclass ArticleDetailView(DetailView):\n model = Post\n template_name = 'article_details.html'\n\nclass AddPostView(CreateView):\n model = Post\n form_class = PostForm\n template_name = 'add_post.html'\n #fields = '__all__'\n \n \nclass UpdatePostView(UpdateView):\n model = Post\n form_class = EditForm\n template_name = 'update_post.html'\n\nclass DeletePostView(DeleteView):\n model = Post\n template_name = 'delete_post.html'\n success_url = reverse_lazy('home')" } ]
5
ivanoel/invasao-alien-em-python
https://github.com/ivanoel/invasao-alien-em-python
8d93ff5f6c3667620e3e091f187d9d3d480f5311
73cee1d67cf0c4c58ab1a2f86473e55f4480e8dc
3bb6a21d5babdc5d4155990bcd43bd1b15acea58
refs/heads/master
2023-03-22T13:50:12.431612
2021-02-28T01:47:08
2021-02-28T01:47:08
173,152,956
0
0
null
2019-02-28T17:05:37
2021-02-25T18:07:43
2021-02-28T01:46:28
Python
[ { "alpha_fraction": 0.707317054271698, "alphanum_fraction": 0.707317054271698, "avg_line_length": 19.5, "blob_id": "6b59b9b56383f69d297869a1e4ea64c8d321e1ea", "content_id": "1d35dfad09c7d59faa17128ff4669ac7f4d12f24", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 83, "license_type": "permissive", "max_line_length": 50, "num_lines": 4, "path": "/espaco.py", "repo_name": "ivanoel/invasao-alien-em-python", "src_encoding": "UTF-8", "text": "import pygame\n\nclass Espaço():\n image = pygame.image.load(\"images/espaco.bmp\")\n" }, { "alpha_fraction": 0.6280596852302551, "alphanum_fraction": 0.6292537450790405, "avg_line_length": 31.038461685180664, "blob_id": "e239822469298b901b2955390ead1d7aec94cd81", "content_id": "7240292f59f59d15584e5d8edb5c72246956e5d6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1691, "license_type": "permissive", "max_line_length": 81, "num_lines": 52, "path": "/nave.py", "repo_name": "ivanoel/invasao-alien-em-python", "src_encoding": "UTF-8", "text": "import pygame\nfrom pygame.sprite import Sprite\n\nclass Nave(Sprite):\n\n def __init__(self, config, screen):\n \"\"\" Inicializa a espaçonave e define sua posição inicial. \"\"\"\n super(Nave, self).__init__()\n self.screen = screen\n self.config = config\n\n # Carrega a imagem da espaçonave e obtem seu rect\n\n self.image = pygame.image.load(\"images/nave-4.png\")\n self.rect = self.image.get_rect()\n self.screen_rect = screen.get_rect()\n\n\n # Inicializa cada nova espaçonave naparte inferior central da tela\n\n self.rect.centerx = self.screen_rect.centerx\n self.rect.bottom=self.screen_rect.bottom\n\n # Armazenar um valor decimal parao centro da espaçonave\n self.center = float(self.rect.centerx)\n\n\n # Flag de movimento\n self.mover_direita = False\n self.mover_esquerda = False\n\n\n def update(self):\n \"\"\"Atualiza a posição da espaçonave de acordo com a flag de movimento.\"\"\"\n # Atualiza o valor do centro da espaçonave, e não o retângulo\n if self.mover_direita and self.rect.right < self.screen_rect.right:\n self.center += self.config.nave_speed_factor \n if self.mover_esquerda and self.rect.left > 0:\n self.center -= self.config.nave_speed_factor\n\n #Atualizaro objeto rect de acordo com self.center\n self.rect.centerx = self.center\n \n def blitme(self):\n \"\"\" Desenha a espaçonave em sua posição atual. \"\"\"\n\n self.screen.blit(self.image,self.rect)\n\n\n def center_nave(self):\n # Centraliza a espaçonave na tela.\n self.center = self.screen_rect.centerx\n \n" }, { "alpha_fraction": 0.5790049433708191, "alphanum_fraction": 0.605248749256134, "avg_line_length": 30.44827651977539, "blob_id": "7af0119c4c23570651a17ecbea9d0915a721ef28", "content_id": "add60c807cb4848da548b77845a392a3fd6fb5ee", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1849, "license_type": "permissive", "max_line_length": 84, "num_lines": 58, "path": "/settings.py", "repo_name": "ivanoel/invasao-alien-em-python", "src_encoding": "UTF-8", "text": "class Settings():\n \"\"\" Uma Classe para armazenar todas as configurações da Invasão Alienigenas. \"\"\"\n\n \n def __init__(self): \n \n \"\"\" Inicializa as configurações do jogo \"\"\"\n \n # Configurações da tela\n self.screen_width = 800\n self.screen_height = 600\n self.bg_color = (230, 230, 230)\n\n # Configurações da espaçonave\n self.nave_speed_factor = 1.5\n self.nave_limit = 3\n\n # Configurações dos projeteis\n self.bala_speed_factor = 2\n self.bala_width = 2\n self.bala_height = 15\n self.bala_color = 60, 60, 60\n self.balas_allowed= 5\n\n #Configurações dos alieniginas\n self.alien_speed_factor = 1\n self.fleet_drop_speed = 10\n # fleet_direction igual a 1 representa a direita; -1 representa a esquerda\n self.fleet_direction = 1\n\n\n # A taxa com que a velocidade do jogo aumenta\n self.speedup_scale = 1.1\n\n # A taxa com que os pontos para cada alienigena aumentam\n self.score_scale = 1.5\n\n self.initialize_dynamic_settings()\n\n def initialize_dynamic_settings(self):\n \"\"\" Inicializa as configurações que mudam no decorrer do jogo. \"\"\"\n self.nave_speed_factor = 1.5\n self.bala_speed_factor = 3\n self.alien_speed_factor = 1\n\n # fleet_direction igual a 1 representa a direita; -1 representa a esquerda\n self.fleet_direction = 1\n\n # Pontuação\n self.alien_points = 10\n\n def increase_speed(self):\n \"\"\"Aumenta as configurações de velocidade. \"\"\"\n self.nave_speed_factor *= self.speedup_scale\n self.bala_speed_factor *= self.speedup_scale\n self.alien_speed_factor *= self.speedup_scale\n\n self.alien_points = int(self.alien_points * self.score_scale)\n \n" }, { "alpha_fraction": 0.6128798723220825, "alphanum_fraction": 0.6136034727096558, "avg_line_length": 28.84782600402832, "blob_id": "1f800205eb3a9b4bd88ea11a0c53d75c49c96d3c", "content_id": "c3682c043cdb1105f530826adb52e6d24add1d55", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1388, "license_type": "permissive", "max_line_length": 80, "num_lines": 46, "path": "/alien.py", "repo_name": "ivanoel/invasao-alien-em-python", "src_encoding": "UTF-8", "text": "import pygame\nfrom pygame.sprite import Sprite\n\n\nclass Alien(Sprite):\n \"\"\" Uma classe que representa um unico alienigena da frota. \"\"\"\n\n\n def __init__(self, config, screen):\n \"\"\" Inicializa o alienigena e define sua posição inicial. \"\"\"\n\n super(Alien,self).__init__()\n self.screen = screen\n self.config = config\n\n\n # Carrega a imagem do alienigena e define seu atributo rect\n self.image = pygame.image.load('images/alienigena.png')\n self.rect = self.image.get_rect()\n\n\n # Inicia cada novo alienigena proximo a parte superior esquerda da tela.\n self.rect.x = self.rect.width\n self.rect.y = self.rect.height\n\n\n # Armazena a posiçãoexta doalienigena.\n self.x = float(self.rect.x)\n\n def blitme(self):\n \"\"\" Desenha o alienigena em sua posição atual. \"\"\"\n self.screen.blit(self.image,self.rect)\n\n\n def check_edges(self):\n \"\"\" Devolve True se o alieniginas estiver na borda da tela. \"\"\"\n screen_rect = self.screen.get_rect()\n if self.rect.right >= screen_rect.right:\n return True\n elif self.rect.left <= 0:\n return True\n\n def update(self):\n \"\"\" Move o alienigina para a direita ou para esquerda. \"\"\"\n self.x += (self.config.alien_speed_factor * self.config.fleet_direction)\n self.rect.x = self.x\n \n" }, { "alpha_fraction": 0.6363934278488159, "alphanum_fraction": 0.6379234790802002, "avg_line_length": 35.02362060546875, "blob_id": "476075b9024a12c7f96e90af2665797f7a33fe2b", "content_id": "964feee226dfdfa9f8f7f96d9f14cc8fc76cf01f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9193, "license_type": "permissive", "max_line_length": 108, "num_lines": 254, "path": "/game_functions.py", "repo_name": "ivanoel/invasao-alien-em-python", "src_encoding": "UTF-8", "text": "import sys\nfrom time import sleep\nimport pygame\nfrom bala import Bala\nfrom alien import Alien\n\n \ndef check_keydown_events(event, config, screen, nave, balas):\n if event.key == pygame.K_RIGHT:\n # move a espaçonave para a direita\n nave.mover_direita = True\n \n # move a espaçonave para a esquerda\n elif event.key == pygame.K_LEFT:\n nave.mover_esquerda = True\n \n elif event.key == pygame.K_UP:\n nave.mover_pracima = True\n \n elif event.key == pygame.K_DOWN:\n nave.mover_prabaixo = True\n \n elif event.key == pygame.K_z:\n fogo_bala(config, screen, nave, balas)\n\n elif event.key == pygame.K_q:\n sys.exit()\n\n \ndef fogo_bala(config, screen, nave, balas):\n \"\"\" Dispara um projetil se o limite ainda nao foi alcançado. \"\"\"\n # Cria um novo projetil e o adicionaao grupo d projeteis.\n # Cria um novo projetil e o adiciona ao grupo de projeteis.\n if len(balas) < config.balas_allowed:\n nova_bala = Bala(config, screen, nave)\n balas.add(nova_bala)\n \ndef check_keyup_events(event, nave):\n #Respode a soltura das teclas\n if event.key == pygame.K_RIGHT:\n nave.mover_direita = False\n elif event.key == pygame.K_LEFT:\n nave.mover_esquerda = False \n elif event.key == pygame.K_UP:\n nave.mover_pracima = False \n elif event.key == pygame.K_DOWN:\n nave.mover_prabaixo = False\n\ndef check_events(config, screen, stats, sb, play_button, nave, aliens, balas):\n \"\"\" Responde a eventos de pressionamento de teclas e de mouse. \"\"\" \n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n check_keydown_events(event, config,screen, nave, balas)\n elif event.type == pygame.KEYUP:\n check_keyup_events(event,nave)\n \n elif event.type == pygame.MOUSEBUTTONDOWN:\n mouse_x, mouse_y = pygame.mouse.get_pos()\n check_play_button(config, screen, stats, sb, play_button, nave, aliens, balas, mouse_x, mouse_y)\n\ndef check_play_button(config, screen, stats, sb, play_button, nave, aliens, balas, mouse_x, mouse_y):\n \"\"\" Inicia um novojogo quando ojogador clicar em Jogar.\"\"\"\n button_clicked = play_button.rect.collidepoint(mouse_x, mouse_y)\n if button_clicked and not stats.game_ativo:\n\n # Reinicia as configurações do jogo\n config.initialize_dynamic_settings()\n \n # Oculta o cursor do mouse\n pygame.mouse.set_visible(False)\n \n # Reinicia os dados estatisiticos do jogo\n stats.reset_stats()\n stats.game_ativo = True\n\n # Reinicia as imagens do painel de pontuação\n sb.prep_score()\n sb.prep_high_score()\n sb.prep_level()\n sb.prep_naves()\n\n # esvazia a lista de alienigenas e de projeteis\n aliens.empty()\n balas.empty()\n\n # Cria uma nova frota e centraliza a espoçonave \n create_fleet(config, screen, nave, aliens)\n nave.center_nave()\n \ndef update_balas(config, screen, stats, sb, nave, aliens, balas):\n \"\"\" Atualiza a posição dos projeteis e se livra dos projeteis antigos.\"\"\"\n # Atualiza as posições dos projeteis.\n balas.update()\n\n # Verifica se algum projetil atingiu os alienigenas\n # Em caso afirmativo, livra-se do projetil e do alienigenas.\n # Livra-se dos projeteis que desapareceram\n for bala in balas.copy():\n if bala.rect.bottom <= 0:\n balas.remove(bala)\n\n check_bala_alien_colisao(config, screen, stats, sb, nave, aliens, balas)\n\ndef check_bala_alien_colisao(config, screen, stats, sb, nave, aliens, balas):\n \"\"\" Responde a colisoes entre protejeis e alienigenas. \"\"\"\n # Remove qualquer projetil e alienigenas quetenha colidido.\n colisao = pygame.sprite.groupcollide(balas, aliens, True, True)\n\n if colisao:\n for aliens in colisao.values():\n stats.score += config.alien_points * len(aliens)\n sb.prep_score()\n check_high_score(stats, sb)\n \n if len(aliens) == 0:\n # Se afrotatodafor destruida, inica um novo nivel\n balas.empty()\n config.increase_speed()\n\n # Aumenta o nivel\n stats.level += 1\n sb.prep_level()\n\n \n create_fleet(config, screen, nave, aliens)\n \ndef check_high_score(stats, sb):\n \"\"\"Verifica se há uma nova pontuação máxima. \"\"\"\n if stats.score > stats.high_score:\n stats.high_score = stats.score\n sb.prep_high_score()\n \ndef get_numero_aliens_x(config, alien_width):\n \"\"\"Determina o numero de alienigenas que cabem em uma linha, \"\"\"\n valor_espaco_x = config.screen_width - 1 * alien_width\n numero_aliens_x = int(valor_espaco_x / (2 * alien_width))\n return numero_aliens_x\n\ndef get_numero_rows(config, nave_height, alien_height):\n \"\"\"Determina o numero de alienigenas que cabem em uma linha, \"\"\"\n valor_espaco_y = (config.screen_height - (3 * alien_height) - nave_height)\n numero_rows = int(valor_espaco_y / (2 * alien_height))\n return numero_rows\n\ndef create_alien(config, screen, aliens, alien_numero, row_numero):\n #Cria um alienigena e posiciona na linha\n alien = Alien(config, screen)\n alien_width = alien.rect.width\n alien.x = alien_width + 2 * alien_width * alien_numero\n alien.rect.x = alien.x\n alien.rect.y = alien.rect.height + 2 * alien.rect.height * row_numero\n aliens.add(alien)\n\n\ndef create_fleet(config, screen, nave, aliens):\n \"\"\"Cria uma frota completa de alienigenas. \"\"\"\n # Cria um alienigenas e calcula o numero de alienigenas em uma linha.\n # O espaçamento entreos alienigenas e igual a largura de um alienigena.\n alien = Alien(config,screen)\n numero_aliens_x = get_numero_aliens_x(config, alien.rect.width)\n numero_rows = get_numero_rows(config, nave.rect.height, alien.rect.height)\n \n # Cria a frota de alienigenas.\n for row_numero in range(numero_rows):\n for alien_numero in range(numero_aliens_x):\n create_alien(config, screen, aliens, alien_numero, row_numero)\n\n#def update_aliens(aliens):\n \"\"\" Atualiza as posições de todos os alieniginas da frota.\"\"\"\ndef check_fleet_edges(config, aliens):\n \"\"\" Responde apropriadamente se algum alienigenas alcançou uma borda.\"\"\"\n for alien in aliens.sprites():\n if alien.check_edges():\n change_fleet_direction(config, aliens)\n break\n\ndef change_fleet_direction(config, aliens):\n \"\"\" Faz toda a frota descer e muda sua direção \"\"\"\n for alien in aliens.sprites():\n alien.rect.y += config.fleet_drop_speed\n config.fleet_direction *= -1\n\ndef check_aliens_bottom(config, screen, stats, sb, nave, aliens, balas):\n #Verifica se algum alien alcançou a parte infeiror da tela\n screen_rect = screen.get_rect()\n for alien in aliens.sprites():\n if alien.rect.bottom >= screen_rect.bottom:\n # Trata esse caso do mesmo que é feitoquando a espaçonave é atingida\n nave_atingida(config, screen, stats, sb, nave, aliens, balas)\n break\n\ndef update_aliens(config, screen, stats, sb, nave, aliens, balas):\n \"\"\"\n Verifica se a frota está em uma das bordas\n e então atualiza as posições de todos os alienigenas da frota. \n \"\"\"\n check_fleet_edges(config, aliens) \n aliens.update()\n\n # Verifica se houve colisões entre alienigenas e a espaçonave.\n if pygame.sprite.spritecollideany(nave, aliens):\n nave_atingida(config, screen, stats, sb, nave, aliens, balas)\n\n\n # Verifca se há algum alienque atingiu aparteinfeior da tela\n check_aliens_bottom(config, screen, stats, sb, nave, aliens, balas)\n\n \ndef nave_atingida(config, screen, stats, sb, nave, aliens, balas):\n # Responde ao fato de a espoçonave ter sido atingida por um alienigena.\n if stats.naves_left > 0:\n # Decrementa nave_left\n stats.naves_left -= 1\n\n # Atualiza o painel de pontuações\n sb.prep_naves()\n \n # esvazia a lista de alienigenas e de projeteis\n aliens.empty()\n balas.empty()\n\n # Cria uma nova frota e centraliza a espoçonave \n create_fleet(config, screen, nave, aliens)\n nave.center_nave()\n\n # Faz uma pausa\n sleep(0.5)\n\n else:\n stats.game_ativo = False\n pygame.mouse.set_visible(True)\n\n \ndef update_screen(config, screen, stats, sb, nave, aliens, bullets, play_button):\n \"\"\"Atualiza as imagens na tela e alterna para nova tela. \"\"\"\n screen.fill(config.bg_color) # Obs: Erro ao exibir disparos\n \n # Redesenha todos os projeteis atras da espaçonave e dos alienigenas.\n for bala in bullets.sprites():\n bala.draw_bala() \n nave.blitme()\n aliens.draw(screen)\n\n #Desenha a informação sobre a pontuação\n sb.show_score()\n\n #Desenha o botão Play seo jogo estiver inativo\n if not stats.game_ativo:\n play_button.draw_button()\n \n #Deixa a tela mais recente visivel\n pygame.display.flip()\n" }, { "alpha_fraction": 0.663001298904419, "alphanum_fraction": 0.6688227653503418, "avg_line_length": 29.920000076293945, "blob_id": "ebc402dec4636bf24c5f2367cf1087b9bb0291c5", "content_id": "3739eac1730c538c07f88bc132487598d5be7aab", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1554, "license_type": "permissive", "max_line_length": 97, "num_lines": 50, "path": "/alien_invasion.py", "repo_name": "ivanoel/invasao-alien-em-python", "src_encoding": "UTF-8", "text": "#import sys\nimport pygame\nfrom pygame.sprite import Group\nfrom settings import Settings\nfrom game_stats import GameStats\nfrom scoreboard import Scoreboard\nfrom button import Button\nfrom nave import Nave\n#from alien import Alien\nimport game_functions as gf\n\ndef inicio_game():\n # inicializa o jogo, as configurações e o objeto screen\n pygame.init()\n config = Settings()\n screen = pygame.display.set_mode(\n (config.screen_width, config.screen_height)\n )\n pygame.display.set_caption(\"Alien Invasion\")\n\n # Cria botão Play\n play_button = Button(config, screen, \"Jogar\")\n \n #Cria uma nova instância para armazenar dados estatisticos do jogo e cria painel de pontuação\n stats= GameStats(config)\n sb = Scoreboard(config, screen, stats)\n \n # Define a cor de fundo\n # bg_color = (230, 230,230)\n \n # Cria uma espaçonave, um grupo de projeteis e um grupo de alienigenas.\n nave = Nave(config, screen)\n balas = Group()\n aliens = Group()\n\n # Cria frota de alienigenas.\n gf.create_fleet(config, screen,nave, aliens)\n\n # Inicia o laço principal do jogo\n while True:\n gf.check_events(config, screen, stats, sb, play_button, nave, aliens, balas)\n \n if stats.game_ativo:\n nave.update()\n gf.update_balas(config, screen, stats,sb, nave, aliens, balas)\n gf.update_aliens(config, screen, stats, sb, nave, aliens, balas)\n \n gf.update_screen(config, screen, stats, sb, nave, aliens, balas, play_button)\n\ninicio_game()\n" }, { "alpha_fraction": 0.6038035154342651, "alphanum_fraction": 0.6085578203201294, "avg_line_length": 26.909090042114258, "blob_id": "a9cb2b14e471594dcc0c88ea2ca9317414eea2b7", "content_id": "03dc80fb106f9ca8436239c6fdb9b037a4273080", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 637, "license_type": "permissive", "max_line_length": 73, "num_lines": 22, "path": "/game_stats.py", "repo_name": "ivanoel/invasao-alien-em-python", "src_encoding": "UTF-8", "text": "import pygame\n\nclass GameStats():\n \"\"\" Armazena dados estatisticos da Invasão Alienigenas. \"\"\"\n\n\n def __init__(self, config):\n # Inicialiaza os dados estatisticos\n self.config = config\n self.reset_stats()\n\n # inicia a invasão alienigenas em um estado ativo\n self.game_ativo = False\n\n # A pontuação máxima jamais deverá ser reiniciada\n self.high_score = 0\n \n def reset_stats(self):\n # Inicialiaza os dados estatisticos quepodem mudar durante o jogo\n self.nave_left = self.config.nave_limit\n self.score = 0\n self.level = 1\n\n\n\n \n \n" } ]
7
ShahadIshraq/pyTests
https://github.com/ShahadIshraq/pyTests
d898f604c4e074e78c57cf78cdb6c1cda426dff7
45d568f9077b14f34b84b825cb6ae437baa88bff
5b5eec62e40e9afcb8e2fb66284bbd526b812b15
refs/heads/master
2021-09-07T08:38:11.950107
2018-02-20T12:26:56
2018-02-20T12:26:56
111,699,315
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5303030014038086, "alphanum_fraction": 0.559440553188324, "avg_line_length": 18.522727966308594, "blob_id": "f2a216a06417fa00b55406967bfd80e990c778e6", "content_id": "6e9bd0e7e8bb54e3a88b9609ec3dea509f4e5444", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 858, "license_type": "no_license", "max_line_length": 65, "num_lines": 44, "path": "/power consumption/integration.py", "repo_name": "ShahadIshraq/pyTests", "src_encoding": "UTF-8", "text": "import numpy as np\nimport scipy.integrate as intg\n\nfrom os import walk\nimport re\n\nt = 1.22e-3\n\n\n\nmypath = input(\"The path : \")\noutput_file = open(mypath+\"/result\", 'w')\nf = []\nfor (dirpath, dirnames, filenames) in walk(mypath):\n f.extend(filenames)\n break\n\nprint(filenames)\n\nfor filename in filenames:\n match = re.match(r'slice[0-9]+', filename)\n if match is None:\n continue\n print(filename)\n f = open(mypath+\"/\"+filename, 'r')\n y = []\n\n c = 0\n for line in f:\n c = c + 1\n line = line.split()\n if len(line) < 1:\n continue\n line = line[0]\n if line is None:\n continue\n value = float(line)\n y.append(value)\n\n f.close()\n x = np.arange(0.0, len(y)*1.22 - 0.2, 1.22)\n output_file.write(str(5.0*intg.simps(y, x)/len(y)*1.22)+\"\\n\")\n\noutput_file.close()" }, { "alpha_fraction": 0.5316165685653687, "alphanum_fraction": 0.5605150461196899, "avg_line_length": 25.89230728149414, "blob_id": "2403162d5363825a5b4d225581c82e25cc8cd786", "content_id": "99c27319d3178290252d4093681eba30bbaa1d14", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3495, "license_type": "no_license", "max_line_length": 63, "num_lines": 130, "path": "/SAP/bin writing.py", "repo_name": "ShahadIshraq/pyTests", "src_encoding": "UTF-8", "text": "import openpyxl as op\n\n# reading the excel file\nimport os\n\nfileName = 'State.xlsx'\nwb = op.load_workbook(fileName)\nsheet = wb.get_sheet_by_name(\"Sheet1\")\n\nprint(\"Number of columns to parse: \"+str(sheet.max_column-5-5))\nprint(\"Number of rows to parse: \"+str(sheet.max_row-2))\n\nfor r in range(3, sheet.max_row+1):\n print(\"****************\")\n print(\"In row : \" + str(r))\n print(\"****************\")\n temp = 0\n pos = 7\n for c in range(6, 14):\n content = sheet.cell(row=r, column=c).value\n if content is not None:\n value = int(content)\n if value == 1:\n temp = temp | 1 << pos\n pos = pos - 1\n sheet.cell(row=r, column=c+1).value = format(temp, '02X')\n print(\"Bin1 = \"+sheet.cell(row=r, column=c+1).value)\n temp = 0\n pos = 7\n for c in range(15, 23):\n content = sheet.cell(row=r, column=c).value\n if content is not None:\n value = int(content)\n if value == 1:\n temp = temp | 1 << pos\n pos = pos - 1\n sheet.cell(row=r, column=c+1).value = format(temp, '02X')\n print(\"Bin2 = \"+sheet.cell(row=r, column=c+1).value)\n\n temp = 0\n pos = 7\n for c in range(24, 32):\n content = sheet.cell(row=r, column=c).value\n if content is not None:\n value = int(content)\n if value == 1:\n temp = temp | 1 << pos\n pos = pos - 1\n sheet.cell(row=r, column=c+1).value = format(temp, '02X')\n print(\"Bin3 = \"+sheet.cell(row=r, column=c+1).value)\n\n temp = 0\n pos = 7\n for c in range(33, 41):\n content = sheet.cell(row=r, column=c).value\n if content is not None:\n value = int(content)\n if value == 1:\n temp = temp | 1 << pos\n pos = pos - 1\n sheet.cell(row=r, column=c+1).value = format(temp, '02X')\n print(\"Bin4 = \"+sheet.cell(row=r, column=c+1).value)\n\n temp = 0\n pos = 7\n for c in range(42, 50):\n content = sheet.cell(row=r, column=c).value\n if content is not None:\n value = int(content)\n if value == 1:\n temp = temp | 1 << pos\n pos = pos - 1\n sheet.cell(row=r, column=c+1).value = format(temp, '02X')\n print(\"Bin5 = \"+sheet.cell(row=r, column=c+1).value)\n print()\n\nwb.save(\"changedState.xlsx\")\nwb = op.load_workbook(\"changedState.xlsx\")\nsheet = wb.get_sheet_by_name(\"Sheet1\")\n\n\ndirectory = \"./bins/\"\nif not os.path.exists(directory):\n os.makedirs(directory)\n\nc = 14\nfile = open(directory+\"bin1.bin\", 'w')\ny = []\nfor r in range(3, sheet.max_row+1):\n content = sheet.cell(row=r, column=c).value + \"\\n\"\n y.append(content)\nfile.writelines(y)\nfile.close()\n\nc = 23\nfile = open(directory+\"bin2.bin\", 'w')\ny = []\nfor r in range(3, sheet.max_row+1):\n content = sheet.cell(row=r, column=c).value + \"\\n\"\n y.append(content)\nfile.writelines(y)\nfile.close()\n\nc = 32\nfile = open(directory+\"bin3.bin\", 'w')\ny = []\nfor r in range(3, sheet.max_row+1):\n content = sheet.cell(row=r, column=c).value + \"\\n\"\n y.append(content)\nfile.writelines(y)\nfile.close()\n\n\nc = 41\nfile = open(directory+\"bin4.bin\", 'w')\ny = []\nfor r in range(3, sheet.max_row+1):\n content = sheet.cell(row=r, column=c).value + \"\\n\"\n y.append(content)\nfile.writelines(y)\nfile.close()\n\nc = 50\nfile = open(directory+\"bin5.bin\", 'w')\ny = []\nfor r in range(3, sheet.max_row+1):\n content = sheet.cell(row=r, column=c).value + \"\\n\"\n y.append(content)\nfile.writelines(y)\nfile.close()" }, { "alpha_fraction": 0.6074106693267822, "alphanum_fraction": 0.6215262413024902, "avg_line_length": 30.054794311523438, "blob_id": "f2940d2f1925cb9e9aee759c4243a3b7110c010a", "content_id": "e699bcc34a7f966ed3d913c6c9cf21f27207fb3f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2267, "license_type": "no_license", "max_line_length": 84, "num_lines": 73, "path": "/Mail/send.py", "repo_name": "ShahadIshraq/pyTests", "src_encoding": "UTF-8", "text": "import smtplib\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nimport openpyxl as op\nfrom string import Template\n\ncount = 0\n\n# creating the mail body\nf = open(\"mail.txt\", \"r\")\nmailBody = f.read()\nf.close()\ns = Template(mailBody)\n# print(s.substitute(name='Shahad Ishraq', cName='Intro to arduino'))\n\n# reading the excel file\nwb = op.load_workbook('excel.xlsx')\nsheet = wb.get_active_sheet();\n\n# setting up the server\nfromaddr = \"teaminterplanetar@gmail.com\"\nserver = smtplib.SMTP('smtp.gmail.com', 587)\nserver.starttls()\nserver.login(fromaddr, \"interpl@netar\")\nprint(\"Logged in\")\n\nfor r in range (56, 57):\n # cell = sheet.cell(row=r , column=3).value\n name = sheet.cell(row=r, column=2).value\n name = name.title()\n toaddr = sheet.cell(row=r, column=4).value\n courses = sheet.cell(row=r, column=6).value\n courses = courses.split(', ')\n if 'CAD' in courses:\n courses.remove('CAD')\n if 'SolidWorks' in courses:\n courses.remove('SolidWorks')\n if len(courses) == 0:\n continue\n # creating the message\n msg = MIMEMultipart()\n msg['From'] = fromaddr\n msg['To'] = toaddr\n msg['Subject'] = \"Orientation class for the course \" + courses[0]\n if courses[0] == 'Introduction to programming with C' :\n body = s.substitute(name=name, cName=courses[0], time='10')\n else:\n body = s.substitute(name=name, cName=courses[0], time='11')\n\n count += 1\n msg.attach(MIMEText(body, 'plain'))\n text = msg.as_string()\n server.sendmail(fromaddr, toaddr, text)\n print('To: '+name+' Address: '+toaddr+' Subject: '+msg['Subject'])\n\n if len(courses) == 2 :\n msg = MIMEMultipart()\n msg['From'] = fromaddr\n msg['To'] = toaddr\n msg['Subject'] = \"Orientation class for the course \" + courses[1]\n if courses[1] == 'Introduction to programming with C':\n body = s.substitute(name=name, cName=courses[1], time='10')\n else:\n body = s.substitute(name=name, cName=courses[1], time='11')\n msg.attach(MIMEText(body, 'plain'))\n text = msg.as_string()\n server.sendmail(fromaddr, toaddr, text)\n print('To: ' + name + ' Address: ' + toaddr + ' Subject: ' + msg['Subject'])\n count += 1\n\n\nserver.quit()\nprint(count)\n" }, { "alpha_fraction": 0.6166666746139526, "alphanum_fraction": 0.6499999761581421, "avg_line_length": 29.5, "blob_id": "998aff537ff6f95771809c13658092fbfcade750", "content_id": "bfab491e2f2117f45293186f01588e4d2a31deaa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 60, "license_type": "no_license", "max_line_length": 39, "num_lines": 2, "path": "/SAP/uni test.py", "repo_name": "ShahadIshraq/pyTests", "src_encoding": "UTF-8", "text": "value = int(input())\nprint(\"Changed: \"+format(value, '02X'))" }, { "alpha_fraction": 0.5517241358757019, "alphanum_fraction": 0.5517241358757019, "avg_line_length": 14, "blob_id": "9e0191202f4e1ffae594cfe855e4f687420c52d9", "content_id": "7044f746b736d6b3f53b2d10c7c787fd9c9f4dde", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 29, "license_type": "no_license", "max_line_length": 16, "num_lines": 2, "path": "/Mail/stringTest.py", "repo_name": "ShahadIshraq/pyTests", "src_encoding": "UTF-8", "text": "s = \"SHAHAD ISHRAQ\"\nprint(s.title())" }, { "alpha_fraction": 0.8142856955528259, "alphanum_fraction": 0.8142856955528259, "avg_line_length": 34, "blob_id": "eb02e59fa685f0dec941a86b3d359a625a995329", "content_id": "96845385eb1ad05f7db467b216a789ad7ad87390", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 70, "license_type": "no_license", "max_line_length": 59, "num_lines": 2, "path": "/README.md", "repo_name": "ShahadIshraq/pyTests", "src_encoding": "UTF-8", "text": "# pyTests\nThis repository holds the tests and small works with python\n" }, { "alpha_fraction": 0.7401315569877625, "alphanum_fraction": 0.7478070259094238, "avg_line_length": 37, "blob_id": "df5cf8d63476a328673e7994bbf7490711f5a84c", "content_id": "ac19c19fa210698bdd055366d9cb21018825a0e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1824, "license_type": "no_license", "max_line_length": 156, "num_lines": 48, "path": "/power consumption/README.md", "repo_name": "ShahadIshraq/pyTests", "src_encoding": "UTF-8", "text": "# Plotter , Slicer and Intergrator\n\nThe codes in this folder can be used to plot , slice and integrate.\n* plot.py : plots given file\n* scraping.py : slices the specified windows and outputs to different folder\n* integration.py : Finds the average power of all the outputs generated by scraping.py in the given directory and dumps the results in a file.\n\n## plot.py\n\nTo use this code run\n```\npython3 plot.py\n```\nWhen the program runs, it will ask for the path to the file to plot.\n**The scepicified file must only contain the *y* values. There must be only one value per line.**\n\nUsing this program, find out a threshold which can be used to identify the start of an rx or tx.<br>\nFor example: the normal values oscilate between 20-25 and when there is an rx or tx, it jumps suddenly to more than 28. So, 28 can be used as threshold.<br>\nAlso find out the approximate span or width of a window from the plot. <br>**Keep a safety margin of say 5**<br>\nFor example : The rx_data file can be used with a span of 50.\n\n*The output graph from plot.py can be zoomed. This will help in finding the threshold and span.*\n\n## scraping.py\n\nTo use this code run\n```\npython3 scraping.py\n```\nWhen the program runs, it will ask for \n* The path to the file to slice.\n* The threshold (discussed above)\n* The span (discussed above)\n\n**The scepicified file must only contain the *y* values. There must be only one value per line.**\n\nThe outputs will be generated in a new folder.\n\n## integration.py\n\nTo use this code run\n```\npython3 integration.py\n```\nWhen the program runs, it will ask for the path to the directory that holds the outputs of the previous program (scraping).\nIt will generate the results and dump them in the same directory into a file named **result**\n\nThis file will contain the average power in miliwat for all the slices.\n" }, { "alpha_fraction": 0.5759717226028442, "alphanum_fraction": 0.583038866519928, "avg_line_length": 13.100000381469727, "blob_id": "704f8083acc4d3417a2d461d49fd2e6e83923258", "content_id": "4eb79c6e9e7f15aed394a6f241cbd340a611b460", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 283, "license_type": "no_license", "max_line_length": 36, "num_lines": 20, "path": "/power consumption/plot.py", "repo_name": "ShahadIshraq/pyTests", "src_encoding": "UTF-8", "text": "from matplotlib import pyplot as plt\n\n\n\ns = input(\"file path: \")\nf = open(s)\ny = []\n\nfor line in f:\n line = line.split()\n if len(line) > 0:\n number = float(line[0])\n y.append(number)\nprint(len(y))\nf.close()\n\nplt.title(s)\nplt.grid(b=True)\nplt.plot(y)\nplt.show()\n\n" }, { "alpha_fraction": 0.6483253836631775, "alphanum_fraction": 0.660287082195282, "avg_line_length": 40.70000076293945, "blob_id": "2693ceedcb1e451d08638e618112c689452148ba", "content_id": "94483952f8bc5fa7c5ac1e6d3ee3edb779aeea66", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 418, "license_type": "no_license", "max_line_length": 197, "num_lines": 10, "path": "/Mail/excelTest.py", "repo_name": "ShahadIshraq/pyTests", "src_encoding": "UTF-8", "text": "import openpyxl as op\n\nwb = op.load_workbook('excel.xlsx')\n\nsheet = wb.get_active_sheet();\n\nfor r in range (2,57) :\n cell = sheet.cell(row=r , column=3).value\n if (cell == 'Buet' or cell == 'BUET' or cell == 'buet' or cell == 'Bangladesh University of Engineering and Technology [BUET]' or cell == 'Bangladesh University of Engineering and Technology'):\n print(sheet.cell(row=r , column=2).value + '\\n')\n\n" }, { "alpha_fraction": 0.7039999961853027, "alphanum_fraction": 0.7135999798774719, "avg_line_length": 21.321428298950195, "blob_id": "08b5be95183467c9220dda6fd764cfaff4b47695", "content_id": "57cccdc5c1ce56806d981467bcb266aefdb6fc98", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 625, "license_type": "no_license", "max_line_length": 52, "num_lines": 28, "path": "/Mail/sendTest.py", "repo_name": "ShahadIshraq/pyTests", "src_encoding": "UTF-8", "text": "import smtplib\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\n\ncount = 0\n\n\n# setting up the server\nfromaddr = \"shahad.nowhere@gmail.com\"\nprint(\"Trying to log in\")\nserver = smtplib.SMTP('smtp.gmail.com', 587)\nprint('.')\nserver.starttls()\nprint('.')\nserver.login(fromaddr, \"ihacgdyaoysdhnfh\")\nprint(\"Logged in\")\n\ntoaddr = \"1305022.si@ugrad.cse.buet.ac.bd\"\nmsg = MIMEMultipart()\nmsg['From'] = fromaddr\nmsg['To'] = \"shahad.nowhere@gmail.com\"\nmsg['Subject'] = \"Orientation class for the course \"\nbody = 'this is the body'\nmsg.attach(MIMEText(body, 'plain'))\ntext = msg.as_string()\nserver.sendmail(fromaddr, toaddr, text)\nserver.quit()\nprint(count)\n" }, { "alpha_fraction": 0.4859926998615265, "alphanum_fraction": 0.5054811239242554, "avg_line_length": 20.05128288269043, "blob_id": "45d60d905ebf8c3186ab814b03d6ad8f91e2371b", "content_id": "e3f67f3ca33719604528f3fef8905e7ac27143ba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 821, "license_type": "no_license", "max_line_length": 62, "num_lines": 39, "path": "/power consumption/scraping.py", "repo_name": "ShahadIshraq/pyTests", "src_encoding": "UTF-8", "text": "import os\n\nss = input(\"File path : \")\ns = ss.split(\".\")[0]\nif not os.path.exists(\"./\"+s+\"_output\"):\n os.makedirs(\"./\"+s+\"_output\")\nf = open(ss)\nthreshold = int(input(\"Threshold : \"))\nspan = int(input(\"Span : \"))\ny = []\n# t = 1e-3\nfor line in f:\n line = line.split()\n if len(line) > 0:\n number = float(line[0])\n y.append(number)\nprint(len(y))\nf.close()\n\ncount = 0\nstate = 0\nfileCount = 0\nout = \"\"\nn = 0\n\nwhile n < len(y):\n if y[n] >= threshold and n - 3 + span < len(y):\n out = open(\"./\"+s+\"_output/slice\"+str(fileCount), \"w\")\n fileCount = fileCount + 1\n tempSlice = y[n - 5:n+span-4]\n slices = []\n for x in tempSlice:\n slices.append(str(x)+'\\n')\n\n out.writelines(slices)\n out.close()\n n = n + span + 20\n else:\n n += 1\n" } ]
11
ASU-CSE598-SC4ML/VinayakKothari_1219114741_HW3
https://github.com/ASU-CSE598-SC4ML/VinayakKothari_1219114741_HW3
6a6334d877d338e103b63610640d4c07ef277f68
b215f0483165f074d76a68f4842d75dda9464f98
b6952341f0134ccf3d943e81efdc7fb598e2ff01
refs/heads/main
2023-03-23T20:04:26.158231
2021-03-15T04:27:28
2021-03-15T04:27:28
348,257,285
1
0
null
2021-03-16T07:46:45
2021-03-16T07:46:25
2021-03-15T04:27:28
null
[ { "alpha_fraction": 0.5556768774986267, "alphanum_fraction": 0.5764192342758179, "avg_line_length": 23.078947067260742, "blob_id": "4372b106140e07adab993de3206c33ddfa31d1eb", "content_id": "18b48fbecea0e6d3c83c6bca181b1c255f747baa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 916, "license_type": "no_license", "max_line_length": 55, "num_lines": 38, "path": "/Q2.py", "repo_name": "ASU-CSE598-SC4ML/VinayakKothari_1219114741_HW3", "src_encoding": "UTF-8", "text": "import crypten\nimport torch\nfrom crypten.mpc.primitives import BinarySharedTensor\n\nclass Bob:\n x = None\n m1 =4\n m2=5\n def __init__(self):\n self.x = torch.tensor(4)\n print(\"Tensor Threshold of Bob\"+ str(self.x))\n print(\"valye of Bob\"+ str(self.m1))\n print(\"valye of Bob\"+ str(self.m2))\n\nclass ALice:\n x = None\n def __init__(self):\n self.x = torch.tensor(6)\n print(\"Tensor valye of Alice\"+ str(self.x))\n\n def coupon_value(self , value):\n print(\"Value of Coupon returned\" + str(value))\nclass circuit:\n\n def compute(self):\n alice = ALice()\n bob = Bob()\n b1 = BinarySharedTensor(alice.x)\n b2 = BinarySharedTensor(bob.x)\n m1 =bob.m1\n m2 = bob.m2\n value = b1.gt(b2)\n value = value.get_plain_text()\n alice.coupon_value(value * m1 +(1-value) *m2)\n\ncrypten.init()\nc=circuit()\nc.compute()\n\n" }, { "alpha_fraction": 0.7149028182029724, "alphanum_fraction": 0.720302402973175, "avg_line_length": 24, "blob_id": "a8ed38ab5905718fb340495203349e73b028ac06", "content_id": "65732195f859a1bcced5cb0f2aa517dea640661e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 926, "license_type": "no_license", "max_line_length": 92, "num_lines": 37, "path": "/Q1.py", "repo_name": "ASU-CSE598-SC4ML/VinayakKothari_1219114741_HW3", "src_encoding": "UTF-8", "text": "import itertools\nimport logging\nimport unittest\nfrom CrypTen.test.multiprocess_test_case import MultiProcessTestCase, get_random_test_tensor\n\nimport crypten.communicator as comm\nimport crypten.mpc as mpc\nimport crypten\nimport torch\nfrom crypten.common.tensor_types import is_int_tensor\nfrom crypten.mpc.primitives import BinarySharedTensor, circuit\n\n\n@mpc.run_multiprocess(world_size=1)\ndef Yao_Millionaires():\n # Bob's Value\n x = torch.Tensor([10])\n\n # Alice's Value\n y = torch.Tensor([5])\n\n enc_x = BinarySharedTensor(x)\n enc_y = BinarySharedTensor(y)\n\n print(\"Tensor for BOB\" + str(enc_x._tensor))\n print(\"Tensor for Alice\" + str(enc_y._tensor))\n\n direct_value = x.gt(y).long()\n enctypted = getattr(enc_x, \"gt\")(enc_y)\n\n print(\"Actual comparison value\" + str(direct_value))\n print(\"Actual comparison value\" + str(enctypted))\n\n\ncrypten.init()\ntorch.set_num_threads(1)\nYao_Millionaires()\n\n" } ]
2
samvongsay/Keras_Workspace
https://github.com/samvongsay/Keras_Workspace
072122e60de75a036c665f5cdfe99ba5a28391be
289cfd337b32894efaeccf39dbde17f3d6e27376
74074deab80ec58afd65afc752a1af9bcf01d3b3
refs/heads/master
2020-03-13T02:30:34.476859
2018-04-24T23:53:54
2018-04-24T23:53:54
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7868852615356445, "alphanum_fraction": 0.7868852615356445, "avg_line_length": 39.66666793823242, "blob_id": "e8a44e35a2d4aab33b07dbc5750cdb2e558b4ec3", "content_id": "31ab8672e19b1b50e4ac580185a9b072d08c665e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 122, "license_type": "no_license", "max_line_length": 102, "num_lines": 3, "path": "/README.md", "repo_name": "samvongsay/Keras_Workspace", "src_encoding": "UTF-8", "text": "# Keras_Workspace\n\nThis is a repository of work done on Keras in order to learn more of the library and machine learning.\n" }, { "alpha_fraction": 0.5270639657974243, "alphanum_fraction": 0.5456533432006836, "avg_line_length": 38.760868072509766, "blob_id": "38f1e9d2bd5132f32538645d627e7590ca246177", "content_id": "23e249714fd5e672132cb23ebaf56ee9580fb5e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1829, "license_type": "no_license", "max_line_length": 99, "num_lines": 46, "path": "/keras_example.py", "repo_name": "samvongsay/Keras_Workspace", "src_encoding": "UTF-8", "text": "from keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras import optimizers\nimport numpy\n\nnumpy.random.seed(7)\n\n#Load data\n#===========================================================================\ndataset = numpy.loadtxt(\"pima-indians-diabetes.csv\", delimiter=\",\")\nX = dataset[:,0:8]\nY = dataset[:,8]\n\n#Define model\n#Create CNN by adding one layer at a time, defining the number of nodes and its activation function\n# Exception: First layer should have input_dim be the number of inputs from the dataset.\n# Inputs is the number of features you are measuring. \n#===========================================================================\nmodel = Sequential() #Choose the type of model\nmodel.add(Dense(12, input_dim=8, activation='relu')) #12 nodes with 8 inputs\nmodel.add(Dense(8, activation='relu')) #8 nodes\nmodel.add(Dense(1, activation='sigmoid')) #Output\n\n#Compile\n#Define the loss function, optimizer\n#List of loss function and optimizer is available on the keras website\n#===========================================================================\n\n#Adam optimizer\n#-------------------------------------------\nmodel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) #77%-78% accuracy\n\n#SGD - Stochastic Gradient Descent\n#-------------------------------------------\n#sgd = optimizers.SGD(lr=0.01, clipnorm=1)\n#model.compile(loss='binary_crossentropy', optimizer='sgd', metrics=['accuracy']) #65% accuracy\n\n#Trains model\n#Define epochs and batch size\n#===========================================================================\nmodel.fit(X, Y, epochs=150, batch_size=10)\n\n#Evaluate model\n#===========================================================================\nscores = model.evaluate(X, Y)\nprint(\"\\n%s: %.2f%%\" % (model.metrics_names[1], scores[1]*100))\n" } ]
2
mcaleste/2D-Knapsack-Dynamic-Programming
https://github.com/mcaleste/2D-Knapsack-Dynamic-Programming
725e8b1aff76e544fdbc4ed52d4f94b0d7dd8127
c6d3c2d52d694d435b57fb683ab92e9343141846
972f58aae224d1fdfe12be50f667dfb8ac1f96ab
refs/heads/master
2017-12-05T18:09:44.093626
2017-01-29T02:04:59
2017-01-29T02:04:59
80,269,841
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.5465263724327087, "alphanum_fraction": 0.5592038631439209, "avg_line_length": 36.92718505859375, "blob_id": "9574b3c6ecc747857e2b069568dae4fadb45f682", "content_id": "5a690666329420c65abd4f3c861323ce56b06d6c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7888, "license_type": "permissive", "max_line_length": 195, "num_lines": 206, "path": "/knapsack v4.py", "repo_name": "mcaleste/2D-Knapsack-Dynamic-Programming", "src_encoding": "UTF-8", "text": "\"\"\"\nCreated on Wed Jan 25 19:49:49 2017\n\nGiven a box with size (W,H) and a collection of N rectangles with sizes \n(w_i, h_i) and values v_i>0, returns total value of optimal configuration of \nnonoverlapping rectangles that can be fit in the box\n\n@author: mcaleste\n\"\"\"\n\nimport numpy as np\nimport copy\nfrom itertools import chain\n\ntestBox = np.matlib.zeros((20, 7))\nprint(testBox)\nrectangles = [[2,1,1], [2,2,3], [3,4,8], [7,8,12]] #[height, width, value]\n\n#given a rectangle and position, places rectangle in box at that position\ndef fillBox(box, shape, posY, posX, rectangleID):\n newBox = np.copy(box) \n if posY+shape[0]>np.shape(box)[0] or posX+shape[1]>np.shape(box)[1]:\n #print(\"doesn't fit\")\n return box\n for i in range(shape[0]):\n for j in range(shape[1]):\n try:\n if (newBox[posY+i, posX+j] != 0):\n #print(\"doesn't fit\") \n return box \n else:\n newBox[posY+i, posX+j] = rectangleID\n except:\n return newBox\n return newBox\n \n#splits box in two \ndef cutBox(box, index, dim):\n if dim == 0:\n return [box[:index,:], box[index:,:]]\n if dim == 1:\n return [box[:,:index], box[:,index:]]\n\n#recursive DP algorithm\ndef memoizedFitRectanglesCut(box, shapes):\n hashTable = dict()\n return memoizedFitRectanglesCutAux(box, shapes, hashTable)\n \n#helper function to memoizedFitRectanglesCut\n#recursively cuts box into two pieces, when a piece is the size of a rectangle, inserts rectangle\n#compares all possible ways of cutting box in two down to size of rectangles, computes total score\n#hashes solutions to boxes, so runs in polynomial time (H*W possible ways of cutting HxW box)\ndef memoizedFitRectanglesCutAux(box, shapes, hashTable):\n dims = (np.shape(box)[0], np.shape(box)[1])\n\n #check if box is in hashTable \n if dims in hashTable:\n return hashTable[dims]\n\n else:\n #initialize score to negative infinity\n score = -float('inf')\n #Find all ways to cut box in two\n possibleCuts = []\n for i in range(1,np.shape(box)[0]):\n possibleCuts.append(cutBox(box, i, 0))\n for j in range(1,np.shape(box)[1]):\n possibleCuts.append(cutBox(box, j, 1))\n\n #check if box is shape of rectangle\n for s in shapes:\n if np.shape(box)[0] == s[0]:\n if np.shape(box)[1] == s[1]:\n #if box is shape of rectangle, place rectangle in that spot \n score = s[2]\n hashTable[dims] = score\n return score\n \n if len(possibleCuts) == 0:\n score = 0\n \n #compares all possible ways of recursively cutting box in two\n for b in range(len(possibleCuts)):\n score = max(score, memoizedFitRectanglesCutAux(possibleCuts[b][0], shapes, hashTable) + memoizedFitRectanglesCutAux(possibleCuts[b][1], shapes, hashTable))\n hashTable[dims] = score\n \n return score\n\n \nif __name__ == '__main__':\n print(memoizedFitRectanglesCut(testBox, rectangles))\n #print(memoizedFitRectangles(testBox, testShapes))\n\n\n#Below is the code for algorithms I tried before this one\n#########################################################\n#########################################################\n\n#returns list of disjoint open regions in box\n#idea is to break apart open regions for easier hashing\ndef subProbs(box):\n subProblems = []\n for i in range(np.shape(box)[0]):\n for j in range(np.shape(box)[1]):\n if box[i,j] == 0:\n #check if already added\n if not (box[i,j], i, j) in chain.from_iterable(subProblems): \n neighbors = getNeighbors(box, i, j)\n #add first zero to group \n group = [(box[i,j], i, j)]\n #traversing tree of neighbors, adding to group \n while len(neighbors)>0:\n newNeighbor = neighbors.pop()\n #check if duplicate and if open space\n if newNeighbor not in group and newNeighbor[0] == 0:\n group.append(newNeighbor)\n #get neighbors of this neighbor, add to stack \n additions = getNeighbors(box, newNeighbor[1], newNeighbor[2])\n for a in additions:\n if a not in group:\n neighbors.append(a)\n subProblems.append(group)\n return subProblems\n\n#returns row, column, and value of valid neighbors given index and box\ndef getNeighbors(box, row, column):\n neighbors = [(row-1, column), (row, column-1), (row+1, column), (row, column+1)]\n neighborGroup = [(box[a,b], a, b) for (a,b) in neighbors if 0<=a<np.shape(box)[0] and 0<=b<np.shape(box)[1]]\n return neighborGroup\n\n#takes in subProblem, and normalizes for comparison\ndef normalizeSubProblem(subProblem):\n global countBB \n countBB += 1\n minRow = minColumn = float('inf')\n maxRow = maxColumn = 0\n for i in subProblem:\n if i[1] > maxRow:\n maxRow = i[1]\n if i[1] < minRow:\n minRow = i[1]\n if i[2] > maxColumn:\n maxColumn = i[2]\n if i[2] < minColumn:\n minColumn = i[2]\n columns = maxColumn - minColumn + 1\n rows = maxRow - minRow +1\n box = np.full((rows, columns),1)\n for i in subProblem:\n box[i[1]-minRow,i[2]-minColumn] = 0\n return box\n\n#returns possible moves of rectangles given a box\ndef fit(box, shapes):\n global shapesCount\n shapesThatFit = []\n #iterate through shapes and try to insert each shape into each position\n #if it can be inserted, add shape and position to shapesThatFit \n for a in shapes:\n for i in range(np.shape(box)[0]):\n for j in range(np.shape(box)[1]):\n if box[i,j]==0:\n if not np.array_equal(fillBox(box, a, i, j), box):\n shapesThatFit.append((a, i, j))\n shapesCount += len(shapesThatFit)\n return shapesThatFit\n\ndef memoizedFitRectangles(box, shapes):\n table = dict()\n return memoizedFitRectanglesAux(box, shapes, table)\n \n#first try at dynamic programming solution\n#idea is to look at each disjoint open space in box and consider those individually\ndef memoizedFitRectanglesAux(box, shapes, table):\n subProblems = [normalizeSubProblem(i) for i in subProbs(box)]\n total = 0\n \n for i in subProblems:\n #check if in hashtable\n a = copy.deepcopy(i)\n a.flags.writeable = False\n\n if a.data in table:\n print(table[a.data])\n return table[a.data]\n \n if not 0 in i:\n return 0\n\n else:\n #possible moves is all ways to place rectangles in box\n #iterate over possible moves and recursively evaluate remaining box\n totalValue = -float('inf')\n possibleMoves = fit(i, shapes)\n \n if len(possibleMoves) == 0:\n totalValue = 0\n #search through all possible rectangle moves and recurse on subproblem\n for b in range(len(possibleMoves)):\n totalValue = max(totalValue, possibleMoves[b][0][3] + memoizedFitRectanglesAux(fillBox(i, possibleMoves[b][0], int(possibleMoves[b][1]), int(possibleMoves[b][2])), shapes, table))\n \n #update hash table\n table[a.data] = totalValue\n total += totalValue\n \n return total\n\n \n\n \n \n\n \n \n \n \n \n" }, { "alpha_fraction": 0.5229649543762207, "alphanum_fraction": 0.5372138619422913, "avg_line_length": 32.270408630371094, "blob_id": "502a59adb144897781c8c41438bc874b2485195e", "content_id": "a2c54cdd8eeb516aba02468617df067ed59ef864", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6597, "license_type": "permissive", "max_line_length": 173, "num_lines": 196, "path": "/knapsack v3.py", "repo_name": "mcaleste/2D-Knapsack-Dynamic-Programming", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jan 25 19:49:49 2017\n\n@author: mcaleste\n\"\"\"\n\nimport numpy as np\nimport numpy.matlib\nfrom itertools import chain\nimport itertools\nimport copy\n\n#given a rectangle and position, places rectangle in box at that position\ndef fillBox(box, shape, posY, posX):\n newBox = np.copy(box) \n if posY+shape[1]>np.shape(box)[0] or posX+shape[2]>np.shape(box)[1]:\n #print(\"doesn't fit\")\n return box\n for i in range(shape[1]):\n for j in range(shape[2]):\n try:\n if (newBox[posY+i, posX+j] != 0):\n #print(\"doesn't fit\") \n return box \n else:\n newBox[posY+i, posX+j] = shape[0]\n except:\n return newBox\n return newBox\n \ntestBox = np.matlib.zeros((4, 4))\n#testBox[1,:] = 1\nprint(testBox)\ntestShapes = [[1,3,2,1], [2,3,5,3]]\n\n#returns possible moves of rectangles given a box\ndef fit(box, shapes):\n global shapesCount\n shapesThatFit = []\n #iterate through shapes and try to insert each shape into each position\n #if it can be inserted, add shape and position to shapesThatFit \n for a in shapes:\n for i in range(np.shape(box)[0]):\n for j in range(np.shape(box)[1]):\n if box[i,j]==0:\n if not np.array_equal(fillBox(box, a, i, j), box):\n shapesThatFit.append((a, i, j))\n shapesCount += len(shapesThatFit)\n return shapesThatFit\n\n#returns list of disjoint open regions in box\ndef subProbs(box):\n subProblems = []\n for i in range(np.shape(box)[0]):\n for j in range(np.shape(box)[1]):\n if box[i,j] == 0:\n #check if already added\n if not (box[i,j], i, j) in chain.from_iterable(subProblems): \n neighbors = getNeighbors(box, i, j)\n #add first zero to group \n group = [(box[i,j], i, j)]\n #traversing tree of neighbors, adding to group \n while len(neighbors)>0:\n newNeighbor = neighbors.pop()\n #check if duplicate and if open space\n if newNeighbor not in group and newNeighbor[0] == 0:\n group.append(newNeighbor)\n #get neighbors of this neighbor, add to stack \n additions = getNeighbors(box, newNeighbor[1], newNeighbor[2])\n for a in additions:\n if a not in group:\n neighbors.append(a)\n subProblems.append(group)\n return subProblems\n\ndef getNeighbors(box, row, column):\n neighbors = [(row-1, column), (row, column-1), (row+1, column), (row, column+1)]\n neighborGroup = [(box[a,b], a, b) for (a,b) in neighbors if 0<=a<np.shape(box)[0] and 0<=b<np.shape(box)[1]]\n return neighborGroup\n\n#takes in subProblem, and normalizes for comparison\ndef normalizeSubProblem(subProblem):\n global countBB \n countBB += 1\n minRow = minColumn = float('inf')\n maxRow = maxColumn = 0\n for i in subProblem:\n if i[1] > maxRow:\n maxRow = i[1]\n if i[1] < minRow:\n minRow = i[1]\n if i[2] > maxColumn:\n maxColumn = i[2]\n if i[2] < minColumn:\n minColumn = i[2]\n columns = maxColumn - minColumn + 1\n rows = maxRow - minRow +1\n box = np.full((rows, columns),1)\n for i in subProblem:\n box[i[1]-minRow,i[2]-minColumn] = 0\n return box\n\ndef memoizedFitRectangles(box, shapes):\n r = dict()\n print(r)\n return memoizedFitRectanglesAux(box, shapes, r)\n \ndef memoizedFitRectanglesAux(box, shapes, r):\n memoizedTime = time.time()\n subProblems = [normalizeSubProblem(i) for i in subProbs(box)]\n total = 0\n for i in subProblems:\n print(i)\n a = copy.deepcopy(i)\n a.flags.writeable = False\n if a.data in r:\n print(r[a.data])\n return r[a.data]\n if not 0 in i:\n return 0\n else:\n q = -float('inf')\n possibleMoves = fit(i, shapes)\n if len(possibleMoves) == 0:\n q = 0\n for b in range(len(possibleMoves)):\n q = max(q, possibleMoves[b][0][3] + memoizedFitRectanglesAux(fillBox(i, possibleMoves[b][0], int(possibleMoves[b][1]), int(possibleMoves[b][2])), shapes, r))\n r[a.data] = q\n total += q\n endMemoizedTime = time.time()\n print(endMemoizedTime - memoizedTime)\n return total\n\n\n#given a box, returns all possible cuts\ndef cuts(box):\n possibleCuts = []\n for i in range(np.shape(box)[0]):\n possibleCuts.append(copy.deepcopy(box[i,:]))\n print(possibleCuts)\n for j in range(np.shape(box)[1]):\n possibleCuts.append(copy.deepcopy(box[:,j]))\n return possibleCuts\n\ndef cutBox(box, index, dim):\n return np.split(box,index,axis=dim)\n \nprint(cutBox(testBox, 1, 1))\n\n\n \n\ndef memoizedFitRectanglesCut(box, shapes):\n r = dict()\n print(r)\n return memoizedFitRectanglesCutAux(box, shapes, r)\n \ndef memoizedFitRectanglesCutAux(box, shapes, r):\n memoizedTime = time.time()\n subProblems = [normalizeSubProblem(i) for i in subProbs(box)]\n total = 0\n for i in subProblems:\n print(i)\n a = copy.deepcopy(i)\n a.flags.writeable = False\n if a.data in r:\n print(r[a.data])\n return r[a.data]\n if not 0 in i:\n return 0\n else:\n q = -float('inf')\n possibleMoves = fit(i, shapes)\n if len(possibleMoves) == 0:\n q = 0\n for b in range(len(possibleMoves)):\n q = max(q, possibleMoves[b][0][3] + memoizedFitRectanglesAux(fillBox(i, possibleMoves[b][0], int(possibleMoves[b][1]), int(possibleMoves[b][2])), shapes, r))\n r[a.data] = q\n total += q\n endMemoizedTime = time.time()\n print(endMemoizedTime - memoizedTime)\n return total\n\n \nif __name__ == '__main__':\n import time\n shapesCount = 0\n countBB = 0 \n startTime = time.time()\n #print(memoizedFitRectangles(testBox, testShapes))\n endTime = time.time()\n #print(endTime - startTime)\n #print(shapesCount)\n #print(countBB)\n #print(cuts(testBox))\n\n\n \n\n \n \n\n \n \n \n \n \n" }, { "alpha_fraction": 0.7941176295280457, "alphanum_fraction": 0.8235294222831726, "avg_line_length": 33, "blob_id": "73d4ab4adff30a954afcb292efb1e03b0c9bb5bd", "content_id": "a876cc3421beac5f675d75fd4dee3ee9c9ec6a8f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 34, "license_type": "permissive", "max_line_length": 33, "num_lines": 1, "path": "/README.md", "repo_name": "mcaleste/2D-Knapsack-Dynamic-Programming", "src_encoding": "UTF-8", "text": "# 2D-Knapsack-Dynamic-Programming\n" } ]
3
brandontrabucco/human_matching_experiment
https://github.com/brandontrabucco/human_matching_experiment
1d2a5d8612fd1b1ded7b955f792bf8bccaa4f1f0
dcdbc2ccd0c85b589c41718b21bc9e37a473dce9
e64de602344d3e45388d51fdb0082cc2f0f55f08
refs/heads/master
2020-03-23T14:16:47.657411
2018-07-20T13:56:50
2018-07-20T13:56:50
141,666,674
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8275862336158752, "alphanum_fraction": 0.8275862336158752, "avg_line_length": 57, "blob_id": "656d59c7d2443db54e17da9aa1ae767b8dd1ba5e", "content_id": "0995c72e6c3efae82fc071d936a7ae77adc2fa54", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 116, "license_type": "permissive", "max_line_length": 87, "num_lines": 2, "path": "/README.md", "repo_name": "brandontrabucco/human_matching_experiment", "src_encoding": "UTF-8", "text": "# human_matching_experiment\nCompute the precision and recall of human subjects matching sets of image and captions.\n" }, { "alpha_fraction": 0.5310675501823425, "alphanum_fraction": 0.5423269271850586, "avg_line_length": 30.346405029296875, "blob_id": "88b7cd6bd54a8cd270c507d9c6fcd9c0b7d64069", "content_id": "25f411321ec7216e3ac1132941a165e4596b1c33", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4796, "license_type": "permissive", "max_line_length": 87, "num_lines": 153, "path": "/experiment.py", "repo_name": "brandontrabucco/human_matching_experiment", "src_encoding": "UTF-8", "text": "\"\"\"Author: Brandon Trabucco.\nAn experiment where humans match captions with attributes.\nArgs:\n batches: Number of shuffled batches to annotate.\n examples: Number of examples per batch.\n\"\"\"\n\n\nimport numpy as np\nimport random\nimport json\nimport argparse\nfrom matplotlib.pyplot import title, figure, imshow, axis, text, close\nfrom matplotlib.image import imread\nfrom collections import namedtuple\n\n\nrandom.seed(1234567)\nnp.random.seed(1234567)\nStruct = namedtuple(\"Struct\", \n [\"data\", \"label\"])\n\n\nif __name__ == \"__main__\":\n \"\"\"Program entry point, load the files.\n \"\"\"\n\n parser = argparse.ArgumentParser(\"Matching experiment.\")\n parser.add_argument(\"-b\", \"--batches\", \n type=int, default=1, help=\"Number of shuffled batches to annotate.\")\n parser.add_argument(\"-e\", \"--examples\", \n type=int, default=3, help=\"Number of examples per batch.\")\n args = parser.parse_args()\n\n with open(\"captions.json\", \"r\") as f:\n data = json.load(f)\n\n answers = []\n\n # Loop and create batches.\n for b in range(args.batches):\n\n # Random indices into the dataset.\n indices = np.random.choice(\n len(data), args.examples, replace=False)\n\n # Separate the image and captions with ids.\n images, captions = [], []\n for i in indices:\n c = data[i]\n images.append(Struct(\n data=c[\"image_name\"], label=i))\n captions.append(Struct(\n data=c[\"captions\"][0][0], label=i))\n\n # Randomly shuffle each list.\n random.shuffle(images)\n random.shuffle(captions)\n\n # Construct a graphical quiz\n fig = figure()\n title(\"Image Batch {0} of {1}\".format(b, args.batches))\n axis('off')\n for i, img in enumerate(images):\n\n # Display images.\n fig.add_subplot(2, args.examples, 1 + i)\n image = imread(img.data)\n imshow(image)\n axis('off')\n\n # Display text.\n fig.add_subplot(2, args.examples, \n 1 + i + args.examples)\n text(0.5, 1.0, \"Image {0:3d}\".format(img.label), \n horizontalalignment=\"center\")\n axis('off')\n\n # Build the captions selection.\n question = \"Captions were:\"\n for i, cap in enumerate(captions):\n question += \"\\n ({0:3d}) {1}\".format(i, cap.data)\n\n # Ask for user input.\n fig.show()\n print(question)\n\n # Collect the answers.\n for img in images:\n while True:\n \n # Read inputs separated by spaces\n iis = input(\"Image {0:3d} matches captions... \".format(\n img.label)).strip().split(\" \")\n \n # User has entered the empty string\n if len(iis) == 1 and iis[0] == \"\":\n answers.append([img, []])\n break\n \n try:\n # Try to decode integers\n jjs = list(set([int(i) for i in iis]))\n if all([j >= 0 and j < args.examples for j in jjs]):\n answers.append([img, [captions[j] for j in jjs]])\n break\n print(\"Inputs {0} not in range(0, {1})\".format(jjs, args.examples))\n except:\n print(\"Inputs {0} not numbers\".format(iis))\n\n # Close the plot window.\n close()\n\n # Keep track of these.\n false_positives = 0\n false_negatives = 0\n true_positives = 0\n true_negatives = 0\n all_labels = set(range(args.examples))\n\n # Calculate how many answers are correct.\n for img, caps in answers:\n has_correct_label = False\n present_labels = set()\n\n for cap in caps:\n present_labels.add(cap.label)\n\n if cap.label == img.label:\n has_correct_label = True\n true_positives += 1\n\n if cap.label != img.label:\n false_positives += 1\n\n if not has_correct_label:\n false_negatives += 1\n\n for z in all_labels.difference(present_labels):\n if z != img.label:\n true_negatives += 1\n\n # Export the collected statistics.\n precision = true_positives / (true_positives + false_positives)\n recall = true_positives / (true_positives + false_negatives)\n print(\"Precision is {0:.3f} amnd recall is {1:.3f}\".format(precision, recall))\n\n # Write a data file.\n with open(\"statistics.json\", \"w\") as f:\n json.dump({\n \"precision\": precision, \"recall\": recall,\n \"true_positives\": true_positives, \"true_negatives\": true_negatives,\n \"false_positives\": false_positives, \"false_negatives\": false_negatives}, f)\n" }, { "alpha_fraction": 0.75, "alphanum_fraction": 0.75, "avg_line_length": 15, "blob_id": "866dd2184fe9a64f9223c1a98fc38c8e905d6e06", "content_id": "798b017dfd043b0c09d1a4ab6c3fce41c83767c8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 144, "license_type": "permissive", "max_line_length": 67, "num_lines": 9, "path": "/data/README.md", "repo_name": "brandontrabucco/human_matching_experiment", "src_encoding": "UTF-8", "text": "We expect this folder to contain the following directory structure.\n\ncoco/*\ndeep fashion/*\nother/*\nvisual-genome/*\nvoc/*\nweb crawling/*\nwider/*\n" } ]
3
csmagic/pucsd-iop
https://github.com/csmagic/pucsd-iop
9d46d891f3967a43b7644272a4d4c8d28f701b08
bc7790e71ff2ff6e78b63eef0199c01ec1a26d3f
08fd31a485325cb3b7cc51f53345f8935d8f55c8
refs/heads/master
2020-12-31T00:55:21.050169
2017-05-21T13:37:16
2017-05-21T13:37:16
80,598,870
0
3
null
2017-02-01T07:38:49
2017-02-04T13:24:09
2017-03-05T22:42:05
Haskell
[ { "alpha_fraction": 0.7194244861602783, "alphanum_fraction": 0.7841726541519165, "avg_line_length": 33.5, "blob_id": "3943adb267948e63ce45fc626cdd40722f7ebdba", "content_id": "e63d80223a6f4d10de5ab25115114bb1291aa48c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 139, "license_type": "no_license", "max_line_length": 115, "num_lines": 4, "path": "/14125/README.md", "repo_name": "csmagic/pucsd-iop", "src_encoding": "UTF-8", "text": "# iop\n# AMAN JAISWAL 14125\n\nThis Directory is created for the assignments of IOP-2017 Subjects in Pune University, Computer Science Department.\n\n" }, { "alpha_fraction": 0.649367094039917, "alphanum_fraction": 0.6531645655632019, "avg_line_length": 16.130434036254883, "blob_id": "3275c88cb476e74ed1f1f7fc7523aadde58fce14", "content_id": "4aed12f3447f80dfd405373c069a50eba807df9e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 790, "license_type": "no_license", "max_line_length": 78, "num_lines": 46, "path": "/laban-experiments/src/legacy/render_nobg/trun.py", "repo_name": "csmagic/pucsd-iop", "src_encoding": "UTF-8", "text": "import subprocess\nimport os\n# runs led to create lbn file\np = subprocess.Popen(\"./led\")\np.wait()\n# runs lintel to create .n file\np = subprocess.Popen(\"./lintelm\")\np.wait()\n\n\nuserinput = raw_input(\"Enter the name of .n file created Eg(test.lbn_000.n)\" )\nf = open(userinput,\"r\")\nlines = f.readlines()\nf.close()\nf = open(userinput,\"w\")\nfor line in lines:\n\tif 'floor' not in line:\n\t\tf.write(line)\n\t\nf.close()\n\nf = open(userinput,\"r\")\nlines = f.readlines()\nf.close()\nf = open(userinput,\"w\")\nfor line in lines:\n\tif 'bdx' not in line:\n\t\tf.write(line)\n\t\nf.close()\n\n\nf = open(userinput,\"r\")\nlines = f.readlines()\nf.close()\nf = open(userinput,\"w\")\nfor line in lines:\n\tif 'brdz' not in line:\n\t\tf.write(line)\n\t\nf.close()\n\n\nprint(\"Enter the name of .n file\")\np = subprocess.Popen(\"./lintel\")\np.wait()\t\n\n" }, { "alpha_fraction": 0.6381909251213074, "alphanum_fraction": 0.6557788848876953, "avg_line_length": 21.05555534362793, "blob_id": "03c64b6febf3fabb9dab0f7abd7680bfb8b5f1d2", "content_id": "8e0c70d416ce0a281a880082b8bf4d61e8848e61", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 398, "license_type": "no_license", "max_line_length": 68, "num_lines": 18, "path": "/laban-experiments/src/legacy/render_nobg/Makefile", "repo_name": "csmagic/pucsd-iop", "src_encoding": "UTF-8", "text": "CFLAGS = -g\nCC = gcc\n#CXX = i686-w64-mingw32-g++\nCXX = g++\nexecutables = led lintel lintelm\n\nall: ${executables}\n\nled: led.cpp\n\t$(CXX) -I /usr/include/GL led.cpp -o led -lm -lglut -lGL\n\nlintel: lintel.cpp\n\t$(CXX) -g -I /usr/include/GL lintel.cpp -o lintel -lm -lglut -lGL\n\nlintel: lintel.cpp\n\t$(CXX) -g -I /usr/include/GL lintelm.cpp -o lintelm -lm -lglut -lGL\nclean:\n\trm -rf $(executables) *.tmp \n" }, { "alpha_fraction": 0.61654132604599, "alphanum_fraction": 0.6428571343421936, "avg_line_length": 19.384614944458008, "blob_id": "0703a2ed9a9007d26f62511f4fd8fff8f53d6058", "content_id": "81ef911afd946b104231772011604956bba5ce21", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 266, "license_type": "no_license", "max_line_length": 66, "num_lines": 13, "path": "/laban-experiments/src/render_withbg/Makefile", "repo_name": "csmagic/pucsd-iop", "src_encoding": "UTF-8", "text": "CFLAGS = -g\nCC = gcc\n#CXX = i686-w64-mingw32-g++\nCXX = g++\nexecutables = led lintel \n\nall: ${executables}\n\nled: led.cpp\n\t$(CXX) -I /usr/include/GL led.cpp -o led -lm -lglut -lGL\n\nlintel: lintel.cpp\n\t$(CXX) -g -I /usr/include/GL lintel.cpp -o lintel -lm -lglut -lGL\n\n" }, { "alpha_fraction": 0.45250263810157776, "alphanum_fraction": 0.4987950325012207, "avg_line_length": 28.778589248657227, "blob_id": "63145b9603d0262b5041dc5ee0df4236e72ea152", "content_id": "bc920cd8ee12acc3c6c6d7eaab05ef574eda6a46", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 151040, "license_type": "no_license", "max_line_length": 107, "num_lines": 5072, "path": "/laban-experiments/src/legacy/render_nobg/lintelm.cpp", "repo_name": "csmagic/pucsd-iop", "src_encoding": "UTF-8", "text": "/* lintel version 84\n \n This set of routines reads a specifications of actions \n to be performed in a LED .lbn file in Labanotation, \n or a NUDES .n file, and performs them\n producing an interactive 3D animated display.\n\n If there are two or more staves in labanotation,\n it assumes man on the left, woman on the right\n\n written in C++ for \n Microsoft Visual Studio .NET 2003, Opengl, and Glut32.\n\n \t\tand optionally\tlintel.ini\n\n 7 Nov 2008 lintel084- added 'limit' to declaration comments\n 27 Nov 2007 lintel084- oriented, laction, lsethold, ldochest\n 14 Nov 2007 lintel083- chest twist: laction, ldolimb\n 14 Nov 2007 lintel082- lbn_figures: option of 1 stave\n 5 May 2007 lintel081- angsep, dotouch, surf,\n 3 Apr 2007 lintel080- fix middle relaxed arms, donum\n 6 Dec 2006 lintel079- ptitle, reading lintel.ini\n 4 Dec 2006 lintel078- lintel066 + lintel077, lsetcoords\n 16 Sep 2006 lintel077- dotouch seeks min sqr(surf())\n 19 Aug 2006 lintel076- toe taps use dodrag\n 14 Aug 2006 lintel075- binary search for dodrag\n 13 Aug 2006 lintel074- forbid, allow\n 8 Aug 2006 lintel073- cutting\n 21 Jun 2006 lintel069- lleggesture\n 1 Jun 2006 lintel068- input frame rate, beats per minute \n bpm,fps,ppb,initialise,lgetfiles,\n doub60,fac,FACTOR\n 15 May 2006 lintel066- default .lbn,\n staves 1 2, track, spot turn\n 12 May 2006 lintel065- allow non-alphanumerics in names\n 9 May 2006 lintel064- fix arm angles arlx arlx\n 8 May 2006 lintel063- fix lbows\n 7 May 2006 lintel062- fix lselectfig\n 2 May 2006 lintel061- fix semishadow hold,\n 21 Apr 2006 lintel060- accommodating chest directions\n 19 Apr 2006 lintel059- fixing feet angles\n 18 Apr 2006 lintel058- adding ldoposn, ldokeep\n 15 Apr 2006 lintel057- using only lintel.n\n 14 Mar 2006 lintel056- fix lleggesture,\n 12 Mar 2006 lintel055- fix action (ground: t18 frac -> prop)\n 19 Jan 2006 lintel054- fix globals, dodrag, doground, elow,\n\t inperf, help, checkeys, image,\n 14 Jan 2006 lintel054- fix lleggesture, ldostep,\n 11 Jan 2006 lintel053- fix lselectfig, lgetfiles, lleggesture\n 7 Jan 2006 lintel052- fix lspotturn, laction, lsethold, hold constants, face indicators\n 5 Jan 2006 lintel051- cut forward, back, settle, lside, rename lleggesture\n 26 Dec 2005 lintel050- fix lforward, ssend, ssmall, radten\n 23 Dec 2005 lintel049- fix loverlap, ldopivot, lbent, ldoarms, remove lhaspivot, diagnostics\n 21 Dec 2005 lintel048- fix laction, lback, ldohold, fclh\n 20 Dec 2005 lintel047- fix back step, diagonal aerials\n 5 Dec 2005 lintel046- fix back step\n 4 Dec 2005 lintel045- fix overlapping facing symbols\n 4 Dec 2005 lintel044- fix kept,keptf\n 16 Nov 2005 lintel043- fix closed position\n 11 Nov 2005 lintel042- fix promenade position, bent arms\n 8 Nov 2005 lintel041- fix rotary chasses\n 8 Nov 2005 lintel040- clean up\n 5 Nov 2005 lintel039- fix spot turns in Charmaine\n 1 Nov 2005 lintel038- fix closed and shadow holds\n 1 Nov 2005 lintel037- fix associating pins with rotation signs\n 31 Oct 2005 lintel036- include shadow hold, fix back step\n 3 Oct 2005 lintel035- fix closed and semishadow holds\n 3 Oct 2005 lintel034- fix starting positions man and woman\n 3 Oct 2005 lintel033- syncronise man and woman\n 30 Sep 2005 lintel032- request new file in event of an error\n 28 Sep 2005 lintel031- fix subroutine counting bug\n 27 Sep 2005 lintel030- fix counting bugs (off by 1)\n 26 Sep 2005 lintel029- rewrote setels\n 21 Sep 2005 lintel027- able to run .n files also\n 17 Sep 2005 lintel026- drag limited to high ankle position\n 9 Sep 2005 lintel025- use Lintel.figs.n and Lintel.subs.n\n 13 Aug 2005 lintel024- import ballroom hold from rev18.n \n 8 Aug 2005 lintel023- set holds\n 7 Aug 2005 lintel022- set starting spacing\n 1 Aug 2005 lintel021- set starting positions\n 31 Jul 2005 lintel020- animate both man and woman NBG\n 29 Jul 2005 lintel019- move calls to setman/setwoman\n 6 Jul 2005 lintel018- fix undefined variables t,k,fname\n 5 Jul 2005 lintel017- fix leg-gesture/turn combination\n 4 Jul 2005 lintel016- tidy up \n 2 Jul 2005 lintel015- fix Charmaine spot turn\n 1 Jul 2005 lintel014- NBG\n 30 Jun 2005 lintel013- fix Boston 2 Step women end facing snag\n 29 Jun 2005 lintel012- fix La Bomba arms snag\n 26 Jun 2005 lintel011- accommodating XX (bent) sign\n 15 Jun 2005 lintel010- fix 'touch' problem in 'angsep'\n 14 Jun 2005 lintel009- add data file name to window title\n 13 Jun 2005 lintel008- use Linter.decs.n and Linter.subs.n\n 8 Jun 2005 lintel007- still fixing drag, luci.lbn, bar 25, fr 899\n 7 Jun 2005 lintel006- use 'drag' for toe/foot/heel taps, slowed pivots\n 6 Jun 2005 lintel005- fix Fred's 'drag' problem in luci.lbn\n 5 Jun 2005 lintel004- allow comment lines in .lbn file\n 4 Jun 2005 lintel003- display bar numbers\n 1 Jun 2005 lintel002- use only linter.subs.n and linter.decs.n\n 5 May 2005 lintel001- check number of staves\n 4 May 2005 drawel45- add linter50 for left/right staves\n 3 May 2005 linter - convert to C++\n 2 May 2005 drawel43- delete input summary printout\n 26 Apr 2005 drawel42- add compl\n 26 Apr 2005 drawel41- add shadows\n 25 Apr 2005 complu - fix view command\n 25 Apr 2005 complt - fix final summary\n 24 Apr 2005 drawel40- correct shading\n 24 Apr 2005 drawel39- compress z axis\n 23 Apr 2005 drawel38- fix place/observe error\n 20 Apr 2005 compls - change nels to ne\n 20 Apr 2005 drawel35- write errors to standard output\n 8 Apr 2005 drawel35- debug view transform\n 7 Apr 2005 drawel34- incorporate movement simulation (prfrm)\n 5 Apr 2005 drawel33- incorporate view transform (view)\n 4 Apr 2005 drawel32- scale about screen centre\n 3 Apr 2005 drawel31- input root of datafile name\n 28 Mar 2005 drawel30- improve vertical illumination\n 28 Mar 2005 drawel29- improve file name input \n 27 Mar 2005 drawel28- file name input\n 27 Mar 2005 drawel27- echo commands\n 27 Mar 2005 drawel26- slow pause, added 'v','w'\n 26 Mar 2005 drawel25- fix shading\n 26 Mar 2005 drawel24- fix visibility of frame numbers\n 22 Mar 2005 drawel23- fix x and y rotations\n 22 Mar 2005 drawel - fix z rotation and help\n 22 Mar 2005 drawel - show frame numbers\n 22 Mar 2005 drawel - fix inter-frame delay\n 22 Mar 2005 drawel - fix pausing on first and last frames\n 22 Mar 2005 drawel - add single frame forward and back\n 21 Mar 2005 drawel - add pause routine\n 19 Mar 2005 drawel - separate setels routine.\n 13 Mar 2005 drawel - translate to C++ for .NET\n 3 Apr 2005 complr - list statistics at end\n 1 Apr 2005 complq - read nudes filename root\n 29 Mar 2005 complp - rewritten in C++ for .NET 2003\n 3 Jul 2004 linter - restore upper case figure name initials\n 11 Oct 2003 linter - ensure fend > fstart\n 10 Oct 2003 linter - align output spacing\n 7 Oct 2003 linter - use 'woman', 'man' variables\n 12 Sep 2003 prfrm - error message contains frame number\n 10 Sep 2003 prfrm - touch using nested search\n 7 Sep 2003 compl - avoid expanding the 0-1 frame period with -e option\n 24 Jul 2003 prfrm - extend drag search angle from 10 to 20 degrees\n 24 Jul 2003 linter - use more drag on closing ankle\n 10 Jul 2003 linter - use drag on closing ankle\n 16 Jun 2003 prfrm - improve drag\n 6 Jun 2003 prfrm - debug drag\n 6 Jun 2003 linter - accommodate close sign for standing foot\n 29 May 2003 linter - fixing leg compression\n 1 Feb 2003 linter - optional 1 second pause at start and end\n 29 Jan 2003 linter - use 'started' to fix initial orientation\n 18 Jan 2003 linter - fix initial orientation\n 16 Jan 2003 linter - still fixing arms\n 10 Jan 2003 linter - accommodating menu abbreviations\n 10 Jan 2003 drawel12- fix batch flag descriptions\n 30 Nov 2002 linter - fix arms\n 8 Nov 2002 linter - fix rise and fall again\n 2 Nov 2002 linter - note bars\n 30 Oct 2002 linter - fix misinterpretation of zero bar line\n 19 Oct 2002 linter - fix misinterpretation of arms once more\n 27 Sep 2002 linter - fix misinterpretation of arms again\n 5 Sep 2002 drawel - make x rotation inwards\n 5 Sep 2002 drawel - fix single frame bug\n 5 Sep 2002 linter - fix rise and fall\n 5 Sep 2002 linter - fix misinterpretation of arm columns\n 29 Aug 2002 linter - fix misinterpretation of columns\n 16 Jan 2002 viewc - double precision throughout\n 10 Jan 2002 linter - fix close without weight\n 22 Nov 2001 drawel - report when arrays limit animation length\n 28 Sep 2001 drawel - fix shading to after rotation\n 23 Sep 2001 drawel - fix delay routine\n 23 Sep 2001 drawel - delay double buffer swap\n 21 Sep 2001 drawel - fix CreateWindow bug\n 15 Aug 2001 drawel - start with trigonal bipyramids\n 1 Aug 2001 drawel - add variable display slow\n 14 Jun 2001 drawel - fix shading\n 7 Jun 2001 drawel - show frame numbers\n 24 May 2001 drawel - slow down\n 24 May 2001 drawel - try for animation\n 15 May 2001 drawel - trying to get shading right\n 3 May 2001 drawel - get rotations in right order\n 2 May 2001 drawel - enable hidden surface testing\n 24 Mar 2001 drawel - initsphere revised\n 7 Mar 2001 drawel - opengl routine names inserted\n 22 Nov 2001 linter - still trying to point foot in gestures\n 22 Nov 2001 linter - report gestures in output file\n 8 Nov 2001 linter - don't close at end of step\n 1 Nov 2001 linter - fix pointed foot in gestures\n 1 Nov 2001 linter - fix overturning\n 12 Oct 2001 linter - fix stepping on toes\n 15 Aug 2001 linter - fix floor\n 14 Feb 2001 linter - fix closing without weight some more\n 7 Feb 2001 linter - standard output\n 7 Feb 2001 linter - stop calls to routines overlapping\n 31 Jan 2001 linter - get left and right legs sorted out\n 31 Jan 2001 linter - avoid straightening leg closing without weight\n 24 Jan 2001 drawel - interactive and flag options removed for simplicity.\n 17 Aug 2000 drawel - for gl on Viewsonic/Zondata machines.\n 12 Nov 1999 linter - stop pointing toes when stepping diagonally forward\n 4 Sep 1998 linter - comments about step directions added\n 28 Aug 1998 linter - 'leg[]' used for legs\n 23 Aug 1998 linter - 'stt[]' used for legs\n 31 Jul 1998 linter - check next symbol in column\n 15 Jul 1998 linter - add back routine\n 23 Apr 1998 drawel - display slow\n 17 Apr 1998 drawel - t for translate in z, +/- for slow\n 13 Apr 1998 linter - add date, forward and compress routines\n 12 Apr 1998 linter - simplified NUDES variables\n 23 Mar 1997 linter - fix core dump on dor1234 file\n 23 Mar 1997 linter - optional pixels per frame\n 23 Mar 1997 compl - debug information hidden more\n 18 Mar 1997 compl - optional warnings\n 13 Mar 1997 linter - bent arms\n 17 Jul 1996 prfrm - print command to standard error output\n 7 Feb 1996 prfrm - clean up 'dotouch'\n 3 Feb 1996 prfrm - clear floating exceptions\n 6 Oct 1996 linter - relaxed arms\n 6 Oct 1996 linter - optionally use rightmost staff\n 2 Oct 1996 linter - start at specified bar\n 25 Sep 1996 linter - use relaxed knees\n 13 Sep 1996 linter - written (Don Herbison-Evans)\n 19 Aug 1994 prfrm - remove some goto's\n 13 Aug 1994 prfrm - clean up 'perfrm'\n 21 Jul 1994 compl - fixed 'match' bug\n 29 Apr 1994 prfrm - cleaning up 'drag'\n 28 Apr 1994 compl - variable subroutines permitted\n 22 Sep 1993 prfrm - cleaning up after 'detach' fixed up\n 10 Sep 1993 drawel - accommodate joint information\n 10 Sep 1993 compl - documentation improved\n 16 Aug 1993 compl - 'movjnt' added\n 2 Aug 1993 viewb - allow joint information\n 12 Mar 1993 compl - 'abut' added\n 11 Mar 1993 compl - repeat subroutine calls allowed\n 4 Mar 1993 compl - keyword 'all' turned into 'every'\n 17 Feb 1993 prfrm - 'touch' using Buckdale's algorithm\n 4 Feb 1993 prfrm - 'touch' using scanning\n 28 Oct 1992 prfrm - joint limits added\n 15 Oct 1992 prfrm - viewing transformation separated (view)\n 12 Jun 1992 compl - 'drag' added\n 17 Apr 1992 prfrm - 'touch' fixed\n 26 Feb 1992 drawel - written : for Silicon Graphics Iris\n 12 Feb 1992 compl - allow cubic movements\n 29 Jul 1991 prfrm - print variable values nicely\n 24 Nov 1988 prfrm - more ellipsoids accommodated\n 7 Apr 1987 compl - translate to C, 'copy' command deleted\n 10 Dec 1986 prfrm - translate into C for Digital VAX 780\n 29 Nov 1986 prfrm - write figure for Benesh interpreter\n 25 Nov 1986 compl - write a complete figure\n 17 Oct 1986 prfrm - texture for ellipsoids\n 7 Oct 1986 compl - 'texture' statement added\n 8 Mar 1986 compl - 'print' statement added\n 21 Jul 1985 compl - ellipsoids declared on the fly\n 22 Jun 1985 prfrm - 'grofig' scales about an ellipsoid (Peter Gummer)\n 14 Jun 1985 prfrm - 'spinto' added (Peter Gummer)\n 15 Apr 1985 prfrm - 'setels' simplified (Peter Gummer)\n 12 Apr 1985 compl - simplify subroutine 'join' (Peter Gummer)\n 3 Apr 1985 compl - 'world' added, 'refell' for 'spinby','moveby' (Peter Gummer)\n 3 Jan 1985 prfrm - 'attach', 'detach' work properly (Peter Gummer)\n 23 May 1984 compl - change parsing order\n 7 May 1984 prfrm - 'multiply', 'subtract', 'divide', 'balance' added\n 22 Dec 1983 compl - reduce number of constants in val\n 14 Dec 1983 compl - variable frame numbers\n 12 Dec 1983 prfrm - variable frame numbers\n 25 Nov 1983 prfrm - rotations stored as twist about axis\n 22 Nov 1983 prfrm - separate error message file\n 2 Nov 1983 compl - move integer codes 11-19 to 21-29 (Danuta Kucharska)\n 15 Oct 1983 compl - add 'colour' action (Danuta Kucharska)\n 15 Oct 1983 compl - ellipsoid default colour to flesh (Danuta Kucharska)\n 16 Aug 1983 compl - work out range of frames\n 19 Apr 1983 prfrm - insert 'axes', 'link', fix 'detach', 'ground'\n 16 Sep 1982 prfrm - ellipsoid names put in 1st frame\n 19 Jul 1982 prfrm - tolerance put in subroutine rotput\n 31 Mar 1982 compl - 'observe', 'all', 'par' array stored\n 26 Mar 1982 prfrm - single precision version for speed\n 26 Mar 1982 nudes - move to Digital PDP11/34 \n 8 Oct 1981 prfrm - make data structure of figure a list\n 2 Oct 1981 prfrm - 'add','attach','detach','flex','extend', etc. added\n 1 Oct 1981 compl - 'add','touch','attach','detach','flex',etc\n 28 Sep 1981 compl - declare variables to PMAX\n 16 Jul 1981 compl - to negate variables (Nora Neilands)\n 16 Jun 1981 compl - to store orientations and positions (Nora Neilands)\n 29 May 1980 compl - separate compile and perform\n 29 May 1980 nudes - moved to CDC 1800\n 27 Jul 1979 nudes - 'groell','grojnt' added\n 20 Feb 1979 nudes - subroutines added\n 1 Apr 1977 nudes - named constants introduced\n 17 Jan 1977 nudes - bends use arbitrary reference ellipsoid (Bruce McNair)\n 4 Jun 1976 nudes - made interpretive\n 10 May 1976 nudes - input translation separated from drawing\n 1 Apr 1975 nudes - frame numbers used in input language\n 1 Apr 1975 nudes - translated in to Fortran for IBM 7040\n 28 Oct 1974 nudes - use optionally the plotter\n 24 Sep 1974 nudes - verbalize the input language\n 11 Aug 1973 nudes - translated into Fortran for IBM 7040\n 11 Apr 1973 nudes - allow more than one figure\n 1 Dec 1972 nudes - remove hidden lines\n 11 Aug 1972 nudes - originated in Algol on English Electric KDF9 (Don Herbison-Evans) \n\n *******************************************************\n\n Interactive commands :-\n a - continue animating (opp. of 'i')\n b - if frozen, go back one frame else run backwards (opp. of 'f')\n c - continue through restart at full rate (opp. of 'p')\n d - shift down 1% (opp. of 'u')\n f - if frozen, show next forward frame else run forwards (opp.of 'b')\n g - grow scene by 10% (opp. of 's')\n h - show these instructions\n i - freeze (opp. of 'a')\n j - double the number of polygons per sphere {opp. of 'k')\n k - halve the number of polygons per sphere {opp. of 'j')\n l - shift scene left 1% (opp. of 'r')\n n - display of frame numbers (opp. of 'o')\n o - do not display frame numbers (opp. of 'n')\n p - pause on first and last frames (opp. of 'c')\n q - quit\n r - shift scene right 1% (opp. of 'l')\n s - shrink scene by 10% (opp. of 'g')\n t - shift scene away by 10 (opp. of 'T')\n u - shift up 1% (opp. of 'd')\n v - shift away (opp. of 'w')\n w - shift nearer (opp. of 'v')\n x - rotate 10 degrees about x (left - right) axis (opp. of '1')\n y - rotate 10 degrees about y (vertical) axis (opp. of '2')\n z - rotate 10 degrees about z (front - back) axis (opp. of '3')\n 0 - reset parameters to default values and freeze at start\n - - delay more between frames \n = - delay less between frames\n\n *************************************\n\n NUDES action commands:\n key no description\n\n debug 22 - set debug parameter to given value\n speed 30 - delete or interpolate extra frames\n view 33 - set range of frames actually to be drawn\n figure 1 - declare ellipsoids that compose given figure\n ellips 2 - declare semi-axis lengths of given ellipsoid\n joint 3 - declare position of given joint\n limit 54 - declare angle limits on joint movements\n variab 32 - declare names of variables\n\n repeat 23 - do the given action at each frame in given range\n linear 25 - generate action over given frames at constant rate\n quadra 24 - generate given action accelerating then decelerating\n to rest quadratically\n cubic 46 - generate given action accelerating then decelerating\n to rest cubically\n accele 5 - generate action linearly accelerating from rest\n decele 10 - generate action linearly decelerating to rest\n\n linkx 49 - store coordinates of a joint\n axes 48 - store semiaxis lengths of an ellipsoid\n angles 36 - store the current orientation angles of an ellipsoid\n centre 37 - store current coordinates of centre of an ellipsoid\n subrou 6 - start a subroutine definition\n end 29 - end of subroutine or main program\n call 28 - call a subroutine\n stop 16 - no more commands after this\n\n colour 50 - colour the named ellipsoid the red, green,\n blue coordinates given\n textur 52 - map given image file onto ellipsoid named\n print 51 - print value of given variable\n ground 18 - make lowest point of given figure touch ground plane\n grofig 11 - scale given figure about centre of given\n ellipsoid, multiplying all parts of figure\n by given factor\n groell 34 - scale a single ellipsoid in size keeping all\n joints fixed\n grojnt 35 - scale a single ellipsoid in size keeping a\n nominated joint of it fixed, and allowing its\n centre, other joints and connected ellipsoids\n to move appropriately\n moveby 13 - move given figure by given amounts parallel to\n the axes of given reference ellipsoid\n moveto 27 - move given figure so that centre of given\n ellipsoid is at given position\n movjnt 56 - move a joint relative to the centre of an\n ellipsoid\n spinby 17 - spin given figure about centre of given ellipsoid,\n rotating by given angle about given axis in given\n reference ellipsoid\n spinto 12 - spin given figure about centre of given ellipsoid,\n rotating given ellipsoid to given angles relative\n to axes in given reference ellipsoid\n bendby 19 - bend figure by given angle about given axis in\n given reference ellipsoid at given joint,\n rotating the given moving ellipsoid and all\n those joined to it\n bendto 21 - bend figure at given joint, rotating given\n ellipsoid to given angles relative to axes in\n given reference ellipsoid\n flex 38 - bend given joint about x axis\n rotate 39 - bend given joint about y axis inwards\n abduct 40 - bend given joint about z axis away from body\n drag 53 - bend second (moving) ellipsoid at given joint about\n its given axis so that first ellipsoid\n touches the ground, while ensuring that the rest\n of the figure is grounded also\n touch 15 - make first ellipsoid touch the second ellipsoid\n by bending the third ellipsoid at the given joint\n about the given axis of the third ellipsoid.\n abut 55 - make ellipsoid1 touch ellipsoid2 by moving its\n figure parallel to given axis of ellipsoid3\n balanc 7 - bend at given joint about given axis of reference\n ellipsoid to balance the moving set of ellipsoids\n with respect to y axis gravity\n set 20 - set a value to a named variable\n invert 31 - divide 1 by value\n negate 41 - negate the value of a named variable\n add 14 - add two variables or values to give a variable\n subtra 42 - subtract second value from first to give a variable\n multip 44 - multiply two values to give a variable\n divide 43 - divide second value into first to give a variable\n attach 8 - join 2 figures at the point on the second\n ellipsoid which is shifted from its centre\n by the given coordinates\n detach 9 - break a figure into 2 at given joint, naming\n the figure containing the given ellipsoid by\n the given name, and keeping the old figure name\n for the other part of the old figure\n observ 26 - set eulerian angles of direction of observer\n place 47 - set centre of view of observer\n\n ****************************************************\n\n NUDES syntax:\n\n (nudesscript) = (declarations) (movements) (stop)\n\n where (declarations) =\n (declaration)\n (declarations)\n\n where (declaration) =\n debug (integer)\n speed (multiplier)\n view (framestart) (framestop)\n figure (figurename) (ellipsoidcount) (ellipsoidnamelist)\n ellips (ellipsoidname) (xvalue) (yvalue) (zvalue)\n joint (jointname) (ellipsoidname) (xvalue) (yvalue) (zvalue)\n (ellipsoidname) (xvalue) (yvalue) (zvalue)\n limit (jointname) (xvalue) (yvalue) (zvalue)\n variab (variablecount) (variablenamelist)\n subrou (subroutinename)\n endsub (subroutinename)\n\n where (movements) =\n (movement)\n (movements)\n\n where (movement) =\n call (framestart) (framestop) (sname)\n repeat (framestart) (framestop) (action)\n linear (framestart) (framestop) (action)\n quadra (framestart) (framestop) (action)\n accele (framestart) (framestop) (action)\n decele (framestart) (framestop) (action)\n cubic (framestart) (framestop) (action)\n\n where (action) =\n debug (level)\n call (sname)\n colour (ename) (redvalue) (greenvalue) (bluevalue)\n textur (ename) (iname) (xoffset) (yoffset)\n grofig (fname) (ename) (xfactor) (yfactor) (zfactor)\n groell (ename) (xfactor) (yfactor) (zfactor)\n grojnt (ename) (jname) (xfactor) (yfactor) (zfactor)\n movjnt (jname) (ename) (x) (y) (z)\n moveto (fname) (ename) (x) (y) (z)\n moveby (fname) (referenceellipsoid) (x) (y) (z)\n ground (fname)\n spinto (fname) (ename) (referenceellipsoid)\n (colatitude) (longditude) (twist)\n spinby (fname) (ename) (referenceellipsoid) (angle) (axis)\n bendto (movingellipsoid) (jname) (referenceellipsoid)\n (colatitude) (longditude) (twist)\n bendby (movingellipsoid) (jname) (referenceellipsoid)\n (angle) (axis)\n flex (movingellipsoid) (jname) (angle)\n rotate (movingellipsoid) (jname) (angle)\n abduct (movingellipsoid) (jname) (angle)\n drag (ename) (movingellipsoid) (jname)\n (referenceellipsoid) (axis)\n touch (mvell) (stillell) (movingellipsoid) (referenceellipsoid) (jname) (axis)\n balanc (movingellipsoid) (jname) (referenceellipsoid) (axis)\n detach (movingellipsoid) (jname) (fname)\n attach (ename) (jname) (ename) (x) (y) (z)\n set (variablename) (anything)\n invert (variablename)\n negate (variablename)\n add (variablename) (x) (x)\n subtra (variablename) (x) (x)\n multip (variablename) (x) (x)\n divide (variablename) (x) (x)\n angles (ename) (referenceellipsoid)\n (variablename) (variablename) (variablename)\n centre (ename) (variablename) (variablename) (variablename)\n axes (ename) (variablename) (variablename) (variablename)\n linkx (jname) (variablename) (variablename) (variablename)\n observe (angle) (angle) (angle)\n place (x) (y) (z)\n write (fname)\n print (variablename)\n\n where (ellipsoidnamelist) =\n (ellipsoidname) (ellipsoidnamelist)\n (ellipsoidname)\n\n where (subroutinenamelist) =\n (subroutinename) (subroutinenamelist)\n (subroutinename)\n\n where (variablenamelist) =\n (variablename) (variablenamelist)\n (variablename)\n\n where (x),(y),(z),(angle),\n (colatitude),(longditude),(twist),\n (xfactor),(yfactor),(zfactor),\n (redvalue),(greenvalue),(bluevalue),\n (xoffset),(yoffset) =\n (variablename)\n (value)\n\n where (framestart),(framestop) =\n (positiveinteger)\n\n where (positiveinteger) =\n (digit)(positiveinteger)\n (digit)\n\n where (level),(multiplier) =\n (integer)\n\n where (integer) =\n (positiveinteger)\n -(positiveinteger)\n\n where (value),(xvalue),(yvalue),(zvalue) =\n (integer).(positiveinteger)\n (integer)\n\n where (fname) =\n every\n world\n (figurename)\n (variablename)\n\n where (ename),(stillell),(mvell),\n (movingellipsoid),(referenceellipsoid) =\n world\n (ellipsoidname)\n (variablename)\n\n where (iname) =\n (imagename)\n (variablename)\n\n where (jname) =\n (jointname)\n (variablename)\n\n where (sname) =\n (subroutinename)\n (variablename)\n\n where (prefix) =\n (non-space character)\n\n where (oldfigurename),(newfigurename),\n (figurename),(ellipsoidname),\n (jointname),(subroutinename),\n (variablename),(imagename) =\n (up to 6 non-space characters)\n\n where (axis) =\n (variablename)\n x\n y\n z\n\n where (anything) =\n (value)\n (axis)\n (ellipsoidname)\n (jointname)\n (figurename)\n (subroutinename)\n (variablename)\n\n where (stop) =\n stop\n\n*************************************************************/\n\n#include <stdio.h>\n#include <string.h>\n#include <stdlib.h> \n#include <math.h>\n#include <math.h>\n#include <ctype.h> \n\ntypedef char TCHAR;\n\n#define TRUE 0\n#define FALSE 1\n#define DONE 0\n#define TODO 1\n#define MAN 0\n#define WOMAN 1\n#define MAXINT 1073741824\n#define WINDOW_MODE 1\n#define GLUT_KEY_ESCAPE 27 \n#define BMAX 256 // size of character buffer\n#define EMAX 1024 // maximum number of ellipsoids\n//#define FACTOR 2 // number of y pixels per frame\n#define FMAX 2048 // maximum number of frames\n#define NKEYS 64 // number of keywords\n#define PMAX 6000 // maximum number of actions to perform\n#define SMAX 100 // maximum number of chords around sphere\n#define SMIN 2 // minimum number of chords around sphere\n#define SSTART 20 // initial number of chords around sphere\n\n#define LMAX 5000 // max number of laban score entries \n#define TMAX 30 // max number of staff lines\n#define VMAX 2048 // max number of constants + variables\n#define NCOLM 18 // number of columns around staff\n#define STEP 12 // spacing of symbols \n#define WIDTH 1024 // width of the score \n#define NSYMS 25 // max number of items in each menu\n\n#define RELAX 1 // item number of 'relaxed' symbol\n#define BENT 3 // item number of 'bent' symbol\n#define STRAIGHT 2 // item number of 'straight' symbol\n#define STRETCH 4 // item number of 'stretched' symbol \n#define FRONT 100 // front symbol found\n#define BACK 200 // back symbol found\n#define MLHAND 1 // man's left hand symbol found\n#define MRHAND 2 // man's right hand symbol found\n#define WLHAND 10 // woman's left hand symbol found\n#define WRHAND 20 // woman's right hand symbol found\n#define ARM 'a' // arm found in colm[]\n#define CHEST 'c' // chest found in colm[]\n\n#define LOW 0\n#define MED 1\n#define HIGH 2\n#define BLANK 3\n\n#define NO 0 // no hold\n#define CL 1 // closed hold: normal ballroom dancing position.\n#define PR 2 // promenade position: facing partner, bodies touching,\n // but both prepared to travel to man's L.\n#define CP 3 // counter promenade position: facing partner, bodies touching,\n // but both prepared to travel to man's R.\n#define DB 4 // double hold: facing partner, bodies apart,\n // L hand to R, R hand to L.\n#define OP 5 // open hold: facing partner, bodies apart,\n // man's L to lady's R, other hands free.\n#define CR 6 // crossed open hold: facing partner, bodies apart,\n // man's R to lady's R, other hands free.\n#define OE 7 // open extended hold: both facing same way, bodies apart,\n // man's L hand to lady's R, other hands free.\n#define CO 8 // counter open extended hold: both facing same way, bodies apart,\n // man's R hand to lady's L, other hands free.\n#define SH 9 // shadow hold: both facing same way, bodies touching,\n // L hand to L, R hand to R.\n#define SS 10 // semi-shadow hold: both facing same way, bodies touching, \n // man's L hand to lady's L,\n // man's R hand on lady's R hip, lady's R hand free.\n\n// ini file variables-\n\nint x;\nchar ini_title[256][32];\nchar ini_value[256][128];\nint max_ini = 256;\nint max_ini_len = 32;\nint number_ini;\nint ini_diag = 0;\nint input_file_type;\nint lbn_fps = -1;\nint lbn_bpm = -1;\nint lbn_ppb = 23;\nint lbn_figures = 1; // number of staves to be interpreted\nint time;\nint nextcount = 0;\nbool output_file_id;\n\n// symbol menus-\nchar Area ='A';\nchar Bars ='B';\nchar Dirn ='D';\nchar Face ='F';\nchar Path ='H';\nchar Keys ='K';\nchar Limb ='L';\nchar Misc ='M';\nchar Pins ='P';\nchar Rotn ='R';\nchar Stav ='S';\nchar Volm ='V';\n\nbool forbid[EMAX][EMAX];\n\ndouble doub0;\ndouble doub1;\ndouble doub2;\ndouble doub3;\ndouble doub4;\ndouble doub10;\ndouble doub60;\ndouble doub90;\ndouble doub150;\ndouble doub179;\ndouble doub180;\ndouble doub181;\ndouble doub255;\ndouble doub360;\ndouble doub500;\ndouble doubmax;\ndouble inv2;\ndouble inv3;\ndouble inv4;\ndouble inv5;\ndouble inv6;\ndouble inv10;\ndouble inv256;\ndouble inv1000;\ndouble lg2; //logarithm of 2 \ndouble rt3;\ndouble tolr;\ndouble twopi;\ndouble pi; // 3.142...etc /\ndouble piby2;\ndouble degree; // number of degrees in a radian \ndouble radian; // number of radians in a degree\ndouble rad; // conversion factor from degrees to radians \ndouble radten; // conversion factor from tenths of a degree to radians \ndouble alpha; // basic interactive angle increment\ndouble anglex,angley,anglez; // interactive rotation angles \ndouble dangx,dangy,dangz; // interactive rotation angle increments \n//double fac; // lbn conversion factor from y to frames \ndouble scale; // interactive scaling factor \ndouble SCALE = 1.0; // default scaling to fit window\ndouble tx,ty,tz; // interactive translations\n//double x1a,x1b,x2a,x2b;\n//double y1a,y1b,y2a,y2b;\n//double x1s,x2s,y1s,y2s;\ndouble frac; // fraction of action to be done in this frame\ndouble prop; // proportion of action yet to be done \ndouble step1,step2;\ndouble v;\ndouble varval; // varval - value of variable\ndouble ang[3]; // the 3 eulerian angles \ndouble oldang[3];\ndouble obsang[3];\ndouble factor[3]; // factors in x,y, and z directions \ndouble lighting_rgb[3];\ndouble pplace[3]; // position of centre of observers attention\ndouble semiax[3]; // coordinate triple read from input \ndouble xx[3]; // x,y and z values (x&y used for commands add, subtract,multiply,divide)\ndouble val[VMAX]; // val[i] - if i <= nvals then value of ith constant\n // otherwise (s - i+1)th variable \ndouble maxax[EMAX]; // maxax[j] - largest semiaxis of jth ellipsoid \ndouble minax[EMAX];\ndouble pres[EMAX];\ndouble obs[3][3]; // rotation matrix applied to all ellipsoids to obtain observers view \ndouble col[EMAX][3]; // col[i][j] - colour of ell i; j = 0 red, 1 green, 2 blue \ndouble quasav[EMAX+2][5];\ndouble ax[EMAX][3]; // ax3[i][j] - length of jth semiaxis of ith ellipsoid\ndouble cen[EMAX][3]; // cen[i][j] - jth coordinate of centre of ith ellipsoid\ndouble censav[EMAX][3];\ndouble dcon[EMAX][2][3]; // distances of joint from ellipsoid centres\ndouble jnt[EMAX][3]; // coordinates of joints between ellipsoids \ndouble jntsav[EMAX][3];\ndouble ob3[FMAX][3]; // observation angles of each frame\ndouble pl3[FMAX][3]; // centre of view of each frame\ndouble norm[4*SMAX*SMAX][3]; // normals at sphere faces \ndouble sph[4*SMAX*SMAX][4][3];// vertices of facets of sphere \ndouble lim[EMAX][3][2];\ndouble co3[FMAX][EMAX][3]; // colours of ellipsoids\ndouble ce3[FMAX][EMAX][3]; // coordinates of ellipsoid centres \ndouble ax3[FMAX][EMAX][3]; // ellipsoid semiaxis lengths \ndouble qu3[FMAX][EMAX][4]; // quaternions of ellipsoids \ndouble quat[EMAX+2][5]; // quat[i][j] - angle of ith ellipsoid \n // j = 0,1,2 - components of direction of rotation axis \n // j = 3,4 - sine and cosine of rotation about axis \ndouble point[SMAX][2*SMAX+1][3];\n\n\n\nstruct Symbol {\n\tint a; // TRUE = 0 if already done\n\tint b; // bent indicator\n\tint c; // column position relative to right support column\n\tint h; // height\n\tint i; // item in menu\n\tint l; // lbn file line number\n\tchar m; // menu\n\tint s; // drawing step size\n\tint w; // width\n\tint x; // horizontal position of left side\n\tint y; // vertical position of bottom\n\tint x2; // horizontal position of right side\n\tint y2; // vertical position of top\n\tint d; // height indicator\n };\n\nstruct Symbol lbn[LMAX]; // laban score entries\nint ja; // TRUE = 0 if already done\nint jb; // bendedness of current symbol\nint jc; // current symbol column\nint jh; // current symbol height\nint ji; // current symbol item in menu\nint jl; // line of current symbol\nchar jm; // current symbol menu\nint js; // current symbol step size\nint jw; // current symbol width\nint jx; // current symbol x bottom\nint jy; // current symbol y bottom \nint jx2; // current symbol x top\nint jy2; // current symbol y top\nint jd; // current symbol shading\n/*************************************************************/\n\n// linter variables -\nbool mspace,wspace;\n\ndouble lbn_fpp; // frames per pixel\n\nint blength; // number of bars to interpret\nint bpm; // beats per minute\nint bstart; // bar to start at\nint lcentre; // x position of centre staff line\nint complete; // true if Gloria and Frank to be used\nint dofig; // required gender of current staff\nint dostaff; // index in staff[] of current staff\nint facedif; // difference between facing directions of man and lwoman\nint facecl; // facing score of closed position\nint facepr; // facing score of promenade position\nint facesh; // facing score of shadow position\nint facess; // facing score of semishadow position\nint fbegin,ffin,flen;// start,end, and length of a position\nint fend; // frame number of end of current movement\nint fhalf; // frame halfway through a movement\nint f_max; // maximum frame number\nint fps; // frames/second\nint frange; // number of frames in an action\nint frperbar; // frames per bar;\nint fstart; // first frame number of action\nint gy,gh; // arm gesture range disabled by contact bow\nint haslbn; // TRUE if input is lbn file, FALSE for .n file\nint hold; // one of the defined holds NO,CL,PR,CP,DB,OP,CR,OE,CO,SH,SS\nint holdcl; // closed hold counter \nint holdco; // counter open extended hold counter\nint holdoe; // open extended hold counter\nint holdpr; // promenade hold counter\nint holdsh; // shadow hold counter\nint holdss; // semishadow hold counter\nint j; // counter through symbols\nint keptf; // last frame when last position kept\nint mface,wface; // facing directions of man and woman\nint nbar; // number of current bar\nint nlabs; // number of laban score entries\nint npins; // number of pins below first barline\nint nm; // number of men\nint nw; // number of women\nint nmw; // nm * nw\nint nstaff; // number of staves\nint oriented; // true after orientation\nint pend; // last frame of previous action\nint pstart; // first fame of previous action\nint ppb; // pixels per beat (= 23 );\nint prev_time; // clock reading of previous frame\nint pres_time; // clock reading of current frame\nint previ; // item of previous support symbol\nint prevc; // column of previous support symbol\nint prevhold; // previous hold\nint rise; // height of previous step;\nint ssend; // ending score symbol\nint sstart; // starting score symbol\nint st; // current staff number\nint stmiddle; // halfway across between L and R staves\nint track; // TRUE when tracking viewpoint on main figure\nint xmin,xmax; // width range of score symbols\nint ymax; // top of score\nint yend; // y position of last movement\nint ystart; // y position of start of movement\nint yj[5*FMAX]; // symbols starting at given y positions\nint pins[TMAX][2]; // index and use of initial pins\nint staff[TMAX][6]; // index, x positions, gender, and use of staves\nchar colm[NCOLM]; // limb presigns in the columns\n\n//nudes variables -\nint axis; // axis of next rotation\nint bnums; // TRUE if bar numbers to be displayed \nint comand; // counter through all commands.\nint df; // interactive frame increment \nint ecount; // number of entries in 'elist'\nint ell1; // ellipsoid to touch something \nint ell2; // ellipsoid to be touched \nint ellpsd; // active ellipsoid\nint f; // counter through frames \nint fast; // multiplier of frame numbers\nint fig; // current figure \nint fnums; // TRUE if frame numbers to be displayed \nint forward; // TRUE for animation to go forwards \nint freeze; // TRUE if animation frozen \nint fstop; // last frame number of actions\nint fslow;\nint height = 512; // height of window in pixels\nint hstart; // frame at start of hold\nint hend; // frame at end of hold\nint inmain; // TRUE if still in main NUDES program \nint intersect;\nint jcount ;\nint join; // joint for current bend command \nint k;\nint length; // length of next input string \nint lline; // length of next input line \nint maxint; // largest representable integer\nint more; // if > 0 means more actions for which stp>=fr \nint ne; // number of ellipsoids in current frame\nint nesave;\nint nfaces; // number of faces on sphere \nint nfigs; // number of figures\nint nfiles; // number of texture map files\nint njts; // number of joints \nint nline; // number of current nudes file line\nint npfs; // number of actions \nint nsph; // number of chords around sphere \nint nsubs; // number of subroutines \nint nvals; // number of values in 'val' \nint nvars; // number of variables in array val \nint ok; // ok = 0 if ok, else problem reference number \nint p; // counter through actions\nint pause; // TRUE if pausing on 1st and last frames \nint pok; // true if positive integer read \nint prdone; // TRUE if diagnostic printing already done\nint ptype; // code of current action \nint pp;\nint donesurf; // TRUE if 'surf' called from 'dotouch'\nint refell; // ellipsoid used as angular reference \nint shadow; // TRUE if shadows wanted\nint single; // either TODO or DONE when frozen\nint slow; // number of pause calls between animating frames \nint start; // pointer to next character on current input line \nint t; // type of current action \nint var0;\nint var1;\nint var2;\nint vstart; // first frame from view command\nint vstop; // last frame from view command\nint width = 512; // height of window \nint xw = 10;\nint yw = 10; // lower left corner of window \nint newcol[3];\nint axlen[EMAX]; // lengths of names\nint ellen[EMAX];\nint figlen[EMAX];\nint jntlen[EMAX];\nint fillen[EMAX];\nint keylen[NKEYS];\nint sublen[PMAX];\nint varlen[PMAX];\nint called[PMAX]; // true if subroutine is called \nint cline[PMAX]; // line numbers in input file of each action \nint coel[EMAX][2]; // the 2 ellipsoids joined at a joint \nint defined[PMAX]; // TRUE if subroutine is defined \nint distrn[PMAX]; // how actions are distributed over frames \nint ellfig[EMAX]; // number of the figure containing each ellipsoid\nint elist[EMAX]; // array for lists of ellipsoids in current action \nint figell[EMAX]; // figell[i] - first ellipsoid in ith figure \nint frames[FMAX]; // original NUDES frame numbers\nint frstart[PMAX]; // frstart[i] - frame number of start of ith action \nint frstop[PMAX]; // frstop[i] - frame number of end of ith action \nint jlist[EMAX]; // array for lists of joints in current action \nint knee[EMAX]; // knee[j] - true if jth joint is a knee i.e. flexes backwards \nint nels[FMAX]; // number of ellipsoids in each frame \nint type[PMAX]; // type of action \nint pf[PMAX][6]; // pf[i][j] - jth parameter of ith action- +ve: itself, -ve: index into array val \nint subact[PMAX][2]; // subact[i][] - action numbers of start and end of ith subroutine\nint usevar[PMAX]; // 0 if variable not used \nint order[3][3][3] = {\n { {2,1,1},{1,3,4},{1,5,3} },\n { {3,1,5},{1,2,1},{4,1,3} },\n { {3,4,1},{5,3,1},{1,1,2} }};\nint perm[3][3][3] = {\n { {2,1,1},{1,3,4},{1,5,3} },\n { {3,1,5},{1,2,1},{4,1,3} },\n { {3,4,1},{5,3,1},{1,1,2} }};\n/*\n keyword codes -\n*/\n int figure_keyword_code= 1;\n int ellips_keyword_code= 2;\n int joint_keyword_code= 3;\n int accele_keyword_code= 5;\n int subrou_keyword_code= 6;\n int balanc_keyword_code= 7;\n int attach_keyword_code= 8;\n int detach_keyword_code= 9;\n int decele_keyword_code= 10;\n int grofig_keyword_code= 11;\n int spinto_keyword_code= 12;\n int moveby_keyword_code= 13;\n int add_keyword_code= 14;\n int touch_keyword_code= 15;\n int stop_keyword_code= 16;\n int spinby_keyword_code= 17;\n int ground_keyword_code= 18;\n int bendby_keyword_code= 19;\n int set_keyword_code= 20;\n int bendto_keyword_code= 21;\n int dodebug_keyword_code= 22;\n int repeat_keyword_code= 23;\n int quadra_keyword_code= 24;\n int linear_keyword_code= 25;\n int observ_keyword_code= 26;\n int moveto_keyword_code= 27;\n int call_keyword_code= 28;\n int endsub_keyword_code= 29;\n int speed_keyword_code= 30;\n int invert_keyword_code= 31;\n int variable_keyword_code = 32;\n int view_keyword_code= 33;\n int groell_keyword_code= 34;\n int grojnt_keyword_code= 35;\n int angles_keyword_code= 36;\n int centre_keyword_code= 37;\n int flex_keyword_code= 38;\n int rotate_keyword_code= 39;\n int abduct_keyword_code= 40;\n int negate_keyword_code= 41;\n int subtra_keyword_code= 42;\n int divide_keyword_code= 43;\n int multip_keyword_code= 44;\n int cubic_keyword_code= 46;\n int place_keyword_code= 47;\n int axes_keyword_code= 48;\n int linkx_keyword_code= 49;\n int colour_keyword_code= 50;\n int print_keyword_code= 51;\n int textur_keyword_code= 52;\n int drag_keyword_code= 53;\n int limits_keyword_code= 54;\n int abut_keyword_code= 55;\n int movjnt_keyword_code= 56;\n int growto_keyword_code= 57;\n int color_keyword_code= 58;\n int center_keyword_code= 59;\n int opacty_keyword_code= 60;\n int lghtng_keyword_code= 61;\n int allow_keyword_code= 62;\n int forbid_keyword_code= 63;\n\n\nFILE *infile;\nFILE *nudesfile;\nFILE *figsfile;\n\nchar* junk[BMAX];\n\nchar buf[BMAX]; // input buffer\nchar line[BMAX]; // compl input buffer \nchar lbnline[LMAX][BMAX]; // lbn file lines\nchar string[BMAX]; // next set of non-blank characters from data file */\nchar name[BMAX]; // name of input file\nchar finname[BMAX]; // name of input file\nchar figsname[BMAX]; // name of lintel nudes figures, declarations, and subroutines file */\nchar nudesname[BMAX]; // name of intermediate nudes file\nchar ptitle[BMAX]; // program title\nchar risesub[3][5] = {{'f','l','o','w'},\n {'f','m','e','d'},\n {'f','h','i','g','h'}};\nchar xyz[2][10] = {{'m','x',' ','m','y',' ','m','z'},\n {'w','x',' ','w','y',' ','w','z'}};\nchar aline[PMAX][BMAX]; // nudes input lines\nchar tname[EMAX][BMAX]; // name of texture map file\nchar jname[EMAX][BMAX]; // joint names \nchar sname[EMAX][BMAX]; // subroutine names\nchar vname[EMAX][BMAX]; // variable names\nchar axnam[EMAX][BMAX]; // first entry is the set of axis names 'x','y','z'. The rest are null\nchar ename[EMAX][BMAX]; // ellipsoid names \nchar fname[EMAX][BMAX]; // figure names \nchar tn3[FMAX][EMAX][BMAX];// names of reduced texture map files\nchar null = '\\0';\nchar blank = ' ';\nchar dig[11] = {'0','1','2','3','4','5','6','7','8','9','*'};\nchar dummy[6] = {'d','u','m','m','y'};\nchar every[6] = {'e','v','e','r','y'};\nchar nudes[6] = {'n','u','d','e','s'};\nchar world[6] = {'w','o','r','l','d'};\nchar variab[6]= {'v','a','r','i','a','b'};\nchar expect[9][10] = { {' '},\n {'v','a','l','u','e'},\n {'e','l','l','i','p','s','o','i','d'},\n {'j','o','i','n','t'},\n {'f','i','g','u','r','e'},\n {'a','x','i','s'},\n {'s','u','b','r','o','u','t','i','n','e'},\n {'v','a','r','i','a','b','l','e','s'},\n {'s','t','r','i','n','g'} };\n/*\n par[p,k] - the type of the kth parameter of the pth action -\n 0 none expected\n 1 numeric value or variable name\n 2 ellipsoid name\n 3 joint name\n 4 figure name\n 5 axis name\n 6 subroutine name\n 7 variable name\n 8 anything\n 9 image file name\n*/\nint par[NKEYS][6] = {\n {0,0,0,0,0,0},// 0\n {0,0,0,0,0,0},// 1 figure\n {0,0,0,0,0,0},// 2 ellips\n {0,0,0,0,0,0},// 3 joint\n {0,0,0,0,0,0},// 4 \n {1,0,0,0,0,0},// 5 accele\n {0,0,0,0,0,0},// 6 subrou \n {2,3,2,5,0,0},// 7 balanc\n {2,3,2,1,1,1},// 8 attach\n {2,3,4,0,0,0},// 9 detach\n {1,0,0,0,0,0},// 10 decele\n {4,2,1,1,1,0},// 11 grofig\n {4,2,2,1,1,1},// 12 spinto\n {4,2,1,1,1,0},// 13 moveby\n {7,1,1,0,0,0},// 14 add\n {2,2,2,2,3,5},// 15 touch\n {0,0,0,0,0,0},// 16 stop\n {4,2,2,1,5,0},// 17 spinby\n {4,0,0,0,0,0},// 18 ground\n {2,3,2,1,5,0},// 19 bendby\n {7,8,0,0,0,0},// 20 set\n {2,3,2,1,1,1},// 21 bendto\n {1,0,0,0,0,0},// 22 dodebug\n {1,0,0,0,0,0},// 23 repeat\n {1,0,0,0,0,0},// 24 quadra\n {1,0,0,0,0,0},// 25 linear\n {1,1,1,0,0,0},// 26 observ\n {4,2,1,1,1,0},// 27 moveto\n {8,0,0,0,0,0},// 28 call\n {0,0,0,0,0,0},// 29 endsub\n {0,0,0,0,0,0},// 30 speed\n {7,0,0,0,0,0},// 31 invert\n {0,0,0,0,0,0},// 32 variable\n {0,0,0,0,0,0},// 33 view\n {2,1,1,1,0,0},// 34 groell\n {2,3,1,1,1,0},// 35 grojnt\n {2,2,7,7,7,0},// 36 angles\n {2,7,7,7,0,0},// 37 centre\n {2,3,1,0,0,0},// 38 flex\n {2,3,1,0,0,0},// 39 rotate\n {2,3,1,0,0,0},// 40 abduct\n {7,0,0,0,0,0},// 41 negate\n {7,1,1,0,0,0},// 42 subtra\n {7,1,1,0,0,0},// 43 divide\n {7,1,1,0,0,0},// 44 multiply\n {0,0,0,0,0,0},// 45\n {0,0,0,0,0,0},// 46 cubic\n {1,1,1,0,0,0},// 47 place\n {2,7,7,7,0,0},// 48 axes\n {3,7,7,7,0,0},// 49 linkx\n {2,1,1,1,0,0},// 50 colour\n {7,0,0,0,0,0},// 51 print\n {2,9,1,1,0,0},// 52 texture\n {2,2,3,2,5,0},// 53 drag\n {0,0,0,0,0,0},// 54 limits\n {2,2,2,5,0,0},// 55 abut\n {3,2,1,1,1,0},// 56 movjnt\n\t\t{4,2,1,1,1,0},// 57;\n\t\t{2,1,1,1,0,0},// 58;\n\t\t{2,7,7,7,0,0},// 59;\t\n\t\t{2,1,0,0,0,0},// 60;\n\t\t{1,1,1,0,0,0},// 61;\n\t\t{2,2,0,0,0,0},// 62;\n\t\t{2,2,0,0,0,0}};// 63;\n\nchar keynam[NKEYS][BMAX] = {\n {'k','e','y','w','o','r','d'}, // 0\n {'f','i','g','u','r','e'}, // 1\n {'e','l','l','i','p','s','o','i','d'}, // 2\n {'j','o','i','n','t'}, // 3\n {'c','o','p','y'}, // 4\n {'a','c','c','e','l','e','r','a','t','e'}, // 5\n {'s','u','b','r','o','u','t','i','n','e'}, // 6\n {'b','a','l','a','n','c','e'}, // 7\n {'a','t','t','a','c','h'}, // 8\n {'d','e','t','a','c','h'}, // 9\n {'d','e','c','e','l','e','r','a','t','e'}, // 10\n {'g','r','o','f','i','g'}, // 11\n {'s','p','i','n','t','o'}, // 12\n {'m','o','v','e','b','y'}, // 13\n {'a','d','d'}, // 14\n {'t','o','u','c','h'}, // 15\n {'s','t','o','p'}, // 16\n {'s','p','i','n','b','y'}, // 17\n {'g','r','o','u','n','d'}, // 18\n {'b','e','n','d','b','y'}, // 19\n {'s','e','t'}, // 20\n {'b','e','n','d','t','o'}, // 21\n {'d','e','b','u','g'}, // 22\n {'r','e','p','e','a','t'}, // 23\n {'q','u','a','d','r','a','t','i','c'}, // 24\n {'l','i','n','e','a','r'}, // 25\n {'o','b','s','e','r','v','e'}, // 26\n {'m','o','v','e','t','o'}, // 27\n {'c','a','l','l'}, // 28\n {'e','n','d','s','u','b'}, // 39\n {'s','p','e','e','d'}, // 30\n {'i','n','v','e','r','t'}, // 31\n {'v','a','r','i','a','b','l','e','s'}, // 32\n {'v','i','e','w'}, // 33\n {'g','r','o','e','l','l'}, // 34\n {'g','r','o','j','n','t'}, // 35\n {'a','n','g','l','e','s'}, // 36\n {'c','e','n','t','r','e'}, // 37\n {'f','l','e','x'}, // 38\n {'r','o','t','a','t','e'}, // 39\n {'a','b','d','u','c','t'}, // 40\n {'n','e','g','a','t','e'}, // 41\n {'s','u','b','t','r','a','c','t'}, // 42\n {'d','i','v','i','d','e'}, // 43\n {'m','u','l','t','i','p','l','y'}, // 44\n {'r','e','a','d'}, // 45\n {'c','u','b','i','c'}, // 46\n {'p','l','a','c','e'}, // 47\n {'a','x','e','s'}, // 48\n {'l','i','n','k','x'}, // 49\n {'c','o','l','o','u','r'}, // 50\n {'p','r','i','n','t'}, // 51\n {'t','e','x','t','u','r','e'}, // 52\n {'d','r','a','g'}, // 53\n {'l','i','m','i','t'}, // 54\n {'a','b','u','t'}, // 55\n {'m','o','v','j','n','t'},\t\t\t\t // 56\n\t\t{'g','r','o','w','t','o'}, // 57\n\t\t{'c','o','l','o','r'}, // 58\n\t\t{'c','e','n','t','e','r'}, // 59\n\t\t{'o','p','a','c','t','y'}, // 60\n\t\t{'l','g','h','t','n','g'}, // 61\n\t\t{'a','l','l','o','w'}, // 62\n\t\t{'f','o','r','b','i','d'} }; // 63\n/*\n code[p,k] - type of kth parameter of pth action using -\n\n 0-illegal\n 1-x coordinate\n 2-y coordinate\n 3-z coordinate\n 4-angle 1\n 5-angle 2\n 6-angle 3\n 7-x scaling factor\n 8-y scaling factor\n 9-z scaling factor\n 10-value for a variable\n 11,12,13-red green and blue colour coords, respectively,\n or image texture file reference ,xoffset and yoffset\n 14-debug parameter\n\n 21-axis\n 22-joint\n 23-reference ellipsoid\n 24-moving or central ellipsoid\n 25-figure\n 27,28,29-names of variables\n 30-touching or dragged ellipsoid (ell1)\n 31-touched ellipsoid (ell2)\n*/\nint code[NKEYS][6] = {\n {0,0,0,0,0,0}, // 0\n {0,0,0,0,0,0}, // 1\n {0,0,0,0,0,0}, // 2\n {0,0,0,0,0,0}, // 3\n {0,0,0,0,0,0}, // 4\n {0,0,0,0,0,0}, // 5\n {0,0,0,0,0,0}, // 6\n {24,22,23,21,0,0}, // 7\n {24,22,23,1,2,3}, // 8\n {24,22,25,0,0,0}, // 9\n {0,0,0,0,0,0}, // 10\n {25,24,7,8,9,0}, // 11\n {25,24,23,4,5,6}, // 12\n {25,23,1,2,3,0}, // 13\n {27,1,2,0,0,0}, // 14\n {30,31,24,23,22,21},// 15\n {0,0,0,0,0,0}, // 16\n {25,24,23,4,21,0}, // 17\n {25,0,0,0,0,0}, // 18\n {24,22,23,4,21,0}, // 19\n {27,10,0,0,0,0}, // 20\n {24,22,23,4,5,6}, // 21\n {14,0,0,0,0,0}, // 22\n {0,0,0,0,0,0}, // 23\n {0,0,0,0,0,0}, // 24\n {0,0,0,0,0,0}, // 25\n {4,5,6,0,0,0}, // 26\n {25,24,1,2,3,0}, // 27\n {0,0,0,0,0,0}, // 28\n {0,0,0,0,0,0}, // 29\n {0,0,0,0,0,0}, // 30\n {27,0,0,0,0,0}, // 31\n {0,0,0,0,0,0}, // 32\n {0,0,0,0,0,0}, // 33\n {24,7,8,9,0,0}, // 34\n {24,22,7,8,9,0}, // 35\n {24,23,27,28,29,0}, // 36\n {24,27,28,29,0,0}, // 37\n {24,22,4,0,0,0}, // 38\n {24,22,4,0,0,0}, // 39\n {24,22,4,0,0,0}, // 40\n {27,0,0,0,0,0}, // 41\n {27,1,2,0,0,0}, // 42\n {27,1,2,0,0,0}, // 43\n {27,1,2,0,0,0}, // 44\n {0,0,0,0,0,0}, // 45\n {0,0,0,0,0,0}, // 46\n {1,2,3,0,0,0}, // 47\n {24,27,28,29,0,0}, // 48\n {22,27,28,29,0,0}, // 49\n {24,11,12,13,0,0}, // 50\n {27,0,0,0,0,0}, // 51\n {24,11,12,13,0,0}, // 52\n {30,24,22,23,21,0}, // 53\n {0,0,0,0,0,0}, // 54\n {30,31,23,21,0,0}, // 55\n {22,24,1,2,3,0}, // 56\n\t\t{0,0,0,0,0,0}, // growto 57;\n\t\t{24,11,12,13,0,0}, // color 58;\n\t\t{24,27,28,29,0,0}, // center 59;\n\t\t{30,31,0,0,0,0}, // opacity 60;\n\t\t{30,31,0,0,0,0}, // lighting 61;\n\t\t{30,31,0,0,0,0}, // allow 62;\n\t\t{30,31,0,0,0,0}}; // forbid 63;\n\n/****************************************/\n\nchar menutext[NSYMS][4] = {\n {'B','a','r','s'},\n {'D','i','r','n'},\n {'P','i','n','s'},\n {'F','a','c','e'},\n {'L','i','m','b'},\n {'V','o','l','m'},\n {'A','r','e','a'},\n {'R','o','t','n'},\n {'K','e','y','s'},\n {'M','i','s','c'},\n {'W','a','y','s'},\n {'Z','Z','Z','Z'},\n {'S','t','a','v'} };\n\nint leg[12][3] /* quaternion angles of 11 direction symbols */\n /* for walking */\n = {{ 0, 0, 0},\n { 0, 0, 30},\n { 0,315, 30},\n { 0,270, 30},\n { 0,225, 30},\n { 0,180, 30},\n { 0,180, 30},\n { 0,135, 30},\n { 0, 90, 30},\n { 0, 45, 30},\n { 0, 0, 30},\n { 0, 0, 0}};\n\nint opp[12] /* opposite direction to a movement */\n /* for the leg that is left behind */\n = { 0, 5, 7, 8, 9, 1, 1, 2, 3, 4, 5, 0};\n\nint stt[3][12][3] /* quaternion angles of 11 direction symbols */\n /* for straight limbs */\n /* at hip or shoulder */\n = {{{ 0, 0, 0}, // null\n\t\t\t { 0, 0, 35}, // 1 R forward low\n\t\t\t { 0,315, 35}, // 2 R diagonally forward low\n\t\t\t { 68, 90,-35}, // 3 R side low\n\t\t\t { 0,225, 35}, // 4 R diagonally back low\n\t\t\t { 0,180, 35}, // 5 R back low\n\t\t\t { 0,180, 35}, // 6 L back low\n\t\t\t { 0,135, 35}, // 7 L diagonally back low\n\t\t\t { 68, 90, 35}, // 8 L side low\n\t\t\t { 0, 45, 35}, // 9 L diagonally forward low\n\t\t\t { 0, 0, 35}, // 10 L forward low\n\t\t\t { 0, 0, 0}},// 11 in place low\n\t\t\t{{ 0, 0, 0}, // null\n\t\t\t { 0, 0, 90}, // 1 R forward middle\n\t\t\t {337,339, 98}, // 2 R diag forward middle\n\t\t\t {315,270, 90}, // 3 R side middle\n\t\t\t {158,339, 98},\n\t\t\t {180, 0, 90},\n\t\t\t {180, 0, 90},\n\t\t\t {202, 21, 98},\n\t\t\t {114, 90, 90},\n\t\t\t { 22, 21, 98},\n\t\t\t { 0, 0, 90},\n\t\t\t { 0, 0, 0}},\n\t\t\t{{ 0, 0, 0}, // null\n\t\t\t { 0, 0,135}, // R forward high\n\t\t\t { 0,315,135},\n\t\t\t { 0,270,135},\n\t\t\t { 0,225,135},\n\t\t\t { 0,180,135},\n\t\t\t { 0,180,135},\n\t\t\t { 0,135,135},\n\t\t\t { 0, 90,135},\n\t\t\t { 0, 45,135},\n\t\t\t { 0, 0,135},\n\t\t\t { 0, 0,180}}};\nint trlx[3][12][3] /* quaternion angles of 11 direction symbols */\n /* for relaxed thighs */\n = {{{ 0, 0, 0}, // null\n\t\t\t { 0, 0, 55}, // R low forward\n\t\t\t {321,342, 70},\n\t\t\t {180,270, 44}, // R low side\n\t\t\t {257,310, 60},\n\t\t\t {180, 0, 30}, // R low back\n\t\t\t {180, 0, 30}, // L low back\n\t\t\t { 99, 46, 58},\n\t\t\t { 87, 90, 44}, // L low side\n\t\t\t { 42, 17, 66},\n\t\t\t { 0, 0, 50},\n\t\t\t { 0, 0, 0}},\n\t\t\t{{ 0, 0, 0},\n\t\t\t { 0, 0,110},\n\t\t\t {341,339,107},\n\t\t\t { 45,277, 90},\n\t\t\t {225,294, 95},\n\t\t\t {180, 0, 80},\n\t\t\t {180, 0, 80},\n\t\t\t {135, 66, 95},\n\t\t\t {315, 83, 90},\n\t\t\t { 19, 21,107},\n\t\t\t { 0, 0,100},\n\t\t\t { 0, 0, 0}},\n\t\t\t{{ 0, 0, 0},\n\t\t\t { 0, 0,145},\n\t\t\t {352,338,148},\n\t\t\t { 68,275,135},\n\t\t\t {192,291,135},\n\t\t\t {180, 0,125},\n\t\t\t {180, 0,125},\n\t\t\t {167, 69,134},\n\t\t\t {292, 85,135},\n\t\t\t { 7, 22,148},\n\t\t\t { 0, 0,145},\n\t\t\t { 0, 0,180}}};\nint arlx[3][12][3] /* quaternion angles of 11 direction symbols */\n = {{{ 0, 0, 0}, // null\n { 0, 0, 35}, // R low forward\n {307, 346, 56},\n {287, 335, 102}, // R low side\n {141, 342, 70},\n {180, 0, 50},\n {180, 0, 50},\n {222, 17, 66},\n { 72, 25, 102}, // L low side\n { 57, 12, 54}, // L low diag forward\n { 0, 0, 35}, // L low forward\n { 0, 0, 0}}, // L low centre\n {{ 0, 0, 0}, // null\n { 0, 0, 67},\n {328, 341, 79},\n {303, 331, 108}, // R middle side\n {285, 328, 143},\n {270, 327, 179}, // R middle back\n { 90, 33, 179}, // L middle back\n { 75, 33, 143},\n { 57, 29, 108}, // L middle side\n { 32, 19, 79},\n { 0, 0, 67}, // L middle front\n {315, 60, 98}},// L middle centre\n {{ 0, 0, 0},\n { 0, 0,145},\n {348,338,129},\n {247,275,135},\n {209,298,142},\n { 90, 72,180},\n { 90, 72,180},\n {150, 62,143},\n {112, 85,135},\n { 12, 22,129},\n { 0, 0,125},\n {350,-45,166}}};\n\nint abnt[3][12][3] /* quaternion angles of 11 direction symbols */\n /* for 90 degree bent arms */\n = {{{252 , 17, 95},\n { 0, 0, 0},\n {270, 0, 45},\n {270, 0, 90}, // R low side\n {270, 0, 135},\n {270, 0, 180}, // R low back \n { 90, 0, 180}, // L low back\n { 90, 0, 135},\n { 90, 0, 90},\n { 90, 0, 45},\n { 0, 0, 0}, // L low forward\n { 0, 0, 0}},\n {{ 0, 0, 0},\n { 45, 300, 98},\n { 67, 21, 98}, // R middle side\n {225, 300, 98},\n { 0, 225, 135},\n { 0, 180, 135},\n { 0, 180, 135},\n { 0, 135, 135},\n {135, 60, 98},\n {292, 339, 98}, // L middle side\n {315, 60, 98},\n { 0, 0, 0}},\n {{ 0, 0, 0},\n { 0, 315, 135},\n { 0, 270, 135},\n { 45, 225, 135},\n { 90, 225, 135},\n {135, 225, 135},\n {135, 225, 135},\n { 90, 45, 135},\n { 45, 225, 225},\n { 0, 90, 135},\n { 0, 45, 135},\n { 90, 67, 180}}};\n\n/************************************************/\n\nint main(int argc, char* argv[]);\n\nvoid lgetout(int allok)\n/*\n close files and wait\n\n\tcalled by linter, lcopyfigs, lcopysubs, ldopivot, lchange,\n\t lfindstaff, lfindystart, lleggesture, lselectfig,\n\t loverlap,\n*/\n{\n if (allok == 0) \n {\n printf(\"%s created OK\\n\", nudesname);\n if (infile) fclose(infile);\n if (nudesfile) fclose(nudesfile);\n if (figsfile) fclose(figsfile);\n }\n else\n {\n printf(\"lintel snag, line %d\\n\",j);\n printf(\"%s\\n\",lbnline[j]);\nnotok: goto notok;\n\t ok = 1;\n }\n}/* lgetout */\n/***********************************************************/\n\nvoid initialise(void)\n/*\n set up constants to default values\n\n called by main,\n*/\n{\n double a,b;\n int k,m,n;\n\n prdone = FALSE;\n nbar = -1;\n rise = 1;\n prevc = 0;\n previ = 11;\n track = TRUE;\n\tmspace = false;\n\twspace = FALSE;\n\n doub0 = double(0);\n doub1 = double(1);\n doub2 = double(2);\n doub3 = double(3);\n doub4 = double(4);\n doub10 = double(10);\n\tdoub60 = double(60);\n doub90 = double(90);\n doub150 = double(150);\n doub179 = double(179);\n doub180 = double(180);\n doub181 = double(181);\n doub255 = double(255);\n doub360 = double(360);\n doub500 = double(500);\n inv2 = doub1/doub2;\n inv3 = doub1/doub3;\n inv4 = doub1/doub4;\n inv5 = doub1/double(5);\n inv6 = doub1/double(6);\n inv10 = doub1/doub10;\n inv256 = doub1/double(256);\n inv1000 = doub1/double(1000);\n rt3 = sqrt(doub3);\n piby2 = doub2*atan(doub1);\n pi = piby2+piby2 ;\n twopi = pi+pi;\n radten = twopi/double(3600);\n radian = twopi/doub360;\n degree = doub1/radian;\n lg2 = log(doub2);\n freeze = FALSE;\n forward = TRUE;\n single = DONE;\n pause = FALSE;\n shadow = TRUE;\n fnums = TRUE;\n bnums = TRUE;\n hold = NO;\n prevhold = -99;\n prev_time = -1;\n fstart = 0;\n fstop = 0;\n pstart = 0;\n pend = 0;\n f_max = 0;\n vstart = 0;\n vstop = FMAX;\n inmain = TRUE;\n start = -1;\n lline = 0;\n fast = 1;\n slow = 1;\n fslow = 1;\n njts = 0;\n nvars = 0;\n nfiles = 0;\n nvals = 0;\n axlen[0] = 1; axlen[1] = 1; axlen[2] = 1;\n for ( j = 0 ; j < EMAX ; ++ j )\n {\n if ( j > 2) axlen[j] = -1;\n keylen[j] = 0;\n ellen[j] = -1;\n jntlen[j] = -1;\n fillen[j] = -1;\n figlen[j] = -1;\n sublen[j] = -1;\n varlen[j] = -1;\n knee[j] = 0 ;\n figell[j] = 0;\n ellfig[j] = 0;\n usevar[j] = 0;\n coel[j][0] = -1;\n coel[j][1] = -1;\n subact[j][0] = 0;\n subact[j][1] = 0;\n called[j] = FALSE;\n defined[j] = FALSE;\n val[j] = doub0 ;\n\n for ( k = 0 ; k < 3 ; ++ k )\n {\n cen[j][k] = doub3;\n ax[j][k] = doub2;\n lim[j][k][0] = -doub360;\n lim[j][k][1] = doub360;\n obs[k][0] = doub0;\n obs[k][1] = doub0;\n obs[k][2] = doub0;\n obs[k][k] = doub1;\n }\n col[j][0] = doub255;\n col[j][1] = doub150;\n col[j][2] = doub90;\n quat[j][0] = doub1;\n quat[j][1] = doub0;\n quat[j][2] = doub0;\n quat[j][3] = doub0;\n quat[j][4] = doub1;\n for ( k = 0 ; k < BMAX ; ++ k )\n {\n axnam[j][k] = NULL;\n tname[j][k] = NULL;\n fname[j][k] = NULL;\n ename[j][k] = NULL;\n jname[j][k] = NULL;\n vname[j][k] = NULL;\n sname[j][k] = NULL;\n }\n }\n axnam[0][0] = 'x';\n axnam[1][0] = 'y';\n axnam[2][0] = 'z';\n/*\n set all actions by default to stop\n*/\n for ( j = 0 ; j < PMAX ; ++ j )\n {\n type[j] = stop_keyword_code;\n frstart[j] = 0;\n frstop[j] = 0;\n distrn[j] = 0;\n cline[j] = 0;\n\n for ( k = 0 ; k < 6 ; ++ k )\n pf[j][k] = 0;\n }\n/*\n artificially set up subroutine \"nudes\",\n file \"dummy\", figures \"every\" and \"world\",\n variable \"variable\", and ellipsoid \"world\"-\n*/\n nsubs = 1;\n nfigs = 2;\n ne = 1;\n figell[0] = 0;\n figell[1] = 1;\n\n for ( k = 0 ; k < 6 ; ++ k )\n {\n tname[0][k] = dummy[k];\n sname[0][k] = nudes[k];\n fname[0][k] = every[k];\n fname[1][k] = world[k];\n ename[0][k] = world[k];\n vname[0][k] = variab[k];\n }\n fillen[0] = 5;\n sublen[0] = 5;\n figlen[0] = 5;\n figlen[1] = 5;\n ellen[0] = 5;\n varlen[0] = 6;\n ax[0][0] = doub1;\n ax[0][1] = doub1;\n ax[0][2] = doub1;\n df = 1;\n f = 0;\n nsph = SSTART;\n anglex = doub0; angley = doub0; anglez = doub0;\n tx = doub0; ty = doub0; tz = doub0;\n scale = doub1;\n alpha = doub3;\n t = 0;\n more = 1;\n ok = 0;\n\n// find bits in double mantissa -\t\n\tb = doub1;\n\tm = 0;\n\tfor (a = inv2; doub1 + b > doub1 + a; a *= inv2)\n\t{\n\t\tb = a;\n\t\t++ m;\n\t}\n\ttolr = b+b;\n\tj = 2;\n\tn = 0;\n\n// find bits in integer -\n\tfor (k = 1; k < j; j += j)\n\t{\n\t\tk += k;\n\t\t++ n;\n\t}\n\tmaxint = k;\n\tprintf(\"\\n tolr %g (%d bits), maxint %d (%d bits)\\n\",\n\t\ttolr,m,maxint,n);\n} /* initialise */\n/***************************************/\n\nvoid bell ( int number, int delay)\n{\n\t int i, j;\n\t for ( i = 0; i < number; i++ )\n\t {\n printf ( \"\\a\" );\n\t\t\tfor ( j = 0; j < delay; j++ )\n\t\t\t{\n\t\t\t}\n\t }\n} /* bell */\n/***************************************/\n\nint lfindnext(int c, int y1, int y2)\n/*\n find next symbol in column 'c' in range 'y1' to 'y2'.\n\n called by ldostep,\n*/\n\n{\n int k;\n int q;\n int yy;\n\n q = -1;\n yy = y2;\n for (k = sstart; k < ssend; ++k)\n {\n if ((lbn[k].c == jc) && (lbn[k].y >= y1) && (lbn[k].y <= y2))\n {\n if (lbn[k].y < yy)\n {\n q = k;\n yy = lbn[k].y;\n }\n }\n }\n return(q);\n} /* lfindnext */\n/****************************************************/\n\nvoid lsetframes(void)\n/*\n set the frames over which an action occurs :-\n fstart, fhalf, frange, fend.\n\n called by laction,\n*/\n{\n if (nbar < 1)\n {\n fstart = 0;\n frange = 1;\n fend = 1;\n }\n else\n {\n fstart = int(inv2+lbn_fpp*double(jy-ystart));\n if (fstart < 1) fstart = 1;\n frange = int(inv2+lbn_fpp*double(jh));\n if (frange < 1) frange = 1;\n fend = fstart + frange;\n }\n fhalf = fstart + frange/2;\n if (fend <= fstart) fend = fhalf+1;\n if (fhalf > fend) fend = fhalf+1;\n if (fend > f_max) f_max = fend;\n} /* lsetframes */\n/************************************************/\n\nvoid lcolx(int lcentre)\n/*\n find column number of each symbol\n\t-5 = L arm\n\t-3 = L gesture\n -1 = L support\n\t 1 = R support\n\t 3 = R gesture\n\t 5 = R arm\n\t\n called by linter,\n*/\n{\n int k;\n int kc;\n int kwx;\n\n for (k = 0; k < nlabs; ++k)\n {\n kwx = lbn[k].x + (lbn[k].w/2);\n kc = (kwx - lcentre)/STEP;\n if (kwx < lcentre)\n --kc;\n else\n ++kc;\n lbn[k].c = kc;\n }\n} /* lcolx */\n/************************************************/\n\nvoid lbnread(void)\n/*\n read .lbn laban score file\n\n called by linter,\n*/\n{\n int j;\n int i,x,y,s,w,h;\n char d;\n char m0,m1,m2,m3;\n\n j = 0;\n xmax = 0;\n xmin = 10000;\n\tif ( ( infile = fopen( finname, \"r\" ) ) == NULL )\n\t{\n\t\tprintf(\"lbnread oops\\n\");\na: goto a;\n\t}\n while ((j < LMAX) && (fgets(buf,BMAX,infile) != NULL))\n {\n\t\tstrcpy(lbnline[j],buf);\n\t\tsscanf(buf,\"%c%c%c%c %d %d %d %d %d %d %c\",\n &m0,&m1,&m2,&m3,&i,&x,&y,&s,&w,&h,&d);\n\t\tif (m0 != '#')\n\t\t{\n lbn[j].m = m0;\n if ((m0 == 'P')&&(m1 == 'a'))\n lbn[j].m = Path;\n lbn[j].i = i;\n lbn[j].x = x;\n lbn[j].y = y;\n lbn[j].w = w;\n lbn[j].h = h;\n lbn[j].s = s;\n lbn[j].b = -1;\n lbn[j].l = j;\n lbn[j].a = TODO;\n lbn[j].x2 = x+w;\n lbn[j].y2 = y+h;\n lbn[j].d = BLANK;\n if (d =='M') lbn[j].d = MED;\n if (d =='L') lbn[j].d = LOW;\n if (d =='H') lbn[j].d = HIGH;\n if (x < xmin) xmin = x;\n if (x+w > xmax) xmax = x+w;\n if (j >= LMAX)\n {\n printf(\"\\nBEWARE: score truncated at line %d\\n\",j);\n printf(\"more than %d laban score items\\n\",LMAX);\n }\n ++j;\n\t\t}\n } /* while reading next line */\n nlabs = j;\n\tprintf(\"\\n lbnread: %d lbn symbols\\n\",nlabs);\n} /* lbnread */\n/************************************************/\n\nvoid lassign(void)\n/*\n assign global variables\n\n called by laction, lsorty, lbent,\n*/\n{\n ja = lbn[j].a;\n jb = lbn[j].b;\n jc = lbn[j].c;\n jd = lbn[j].d;\n jh = lbn[j].h;\n ji = lbn[j].i;\n jl = lbn[j].l;\n jm = lbn[j].m;\n js = lbn[j].s;\n jw = lbn[j].w;\n jx = lbn[j].x;\n jy = lbn[j].y;\n jx2 = lbn[j].x2;\n jy2 = lbn[j].y2;\n} /* lassign */\n/**********************************************/\n\nvoid lsorty(void)\n/*\n sort score symbols into ascending order of 'y'\n (bubble sort)\n find maxy, and fill yj table\n\n called by linter,\n calls lassign,\n*/\n{\n int k;\n int last;\n int y;\n\n for (j = 0; j < (nlabs-1); ++j)\n {\n for (k = j; k < nlabs; ++k)\n {\n if (lbn[k].y < lbn[j].y)\n {\n lassign();\n lbn[j].a = lbn[k].a;\n lbn[j].b = lbn[k].b;\n lbn[j].c = lbn[k].c;\n lbn[j].d = lbn[k].d;\n lbn[j].h = lbn[k].h;\n lbn[j].i = lbn[k].i;\n lbn[j].l = lbn[k].l;\n lbn[j].m = lbn[k].m;\n lbn[j].s = lbn[k].s;\n lbn[j].w = lbn[k].w;\n lbn[j].x = lbn[k].x;\n lbn[j].y = lbn[k].y;\n lbn[j].x2 = lbn[k].x2;\n lbn[j].y2 = lbn[k].y2;\n lbn[k].a = ja;\n lbn[k].b = jb;\n lbn[k].c = jc;\n lbn[k].d = jd;\n lbn[k].h = jh;\n lbn[k].i = ji;\n lbn[k].l = jl;\n lbn[k].m = jm;\n lbn[k].s = js;\n lbn[k].w = jw;\n lbn[k].x = jx;\n lbn[k].y = jy;\n lbn[k].x2 = jx2;\n lbn[k].y2 = jy2;\n strcpy(buf,lbnline[j]);\n strcpy(lbnline[j],lbnline[k]);\n strcpy(lbnline[k],buf);\n }\n }\n }\n ymax = 0;\n for (j = 0; j < nlabs; ++j)\n if (((lbn[j].y2) > ymax)&&(lbn[j].m != Stav))\n ymax = lbn[j].y2+1;\n for (y = 0; y < ymax; ++y)\n yj[y] = -1;\n for (j = 0; j < nlabs; ++j)\n {\n y = lbn[j].y;\n\t if (y < 0) y = 0;\n if (yj[y] < 0) yj[y] = j;\n }\n last = 0;\n for (y = 0; y < ymax; ++y)\n {\n if (yj[y] < 0)\n yj[y] = last;\n else\n last = yj[y];\n }\n} /* lsorty */\n/************************************************/\n\nvoid lsortx(int stff[LMAX][2], int nstff)\n/*\n sort staff symbols into ascending order of 'x'\n (bubble sort)\n\n called by lfindstaff,\n*/\n{\n int j;\n int k;\n int s0,s1;\n\n for (j = 0; j < (nstff-1); ++j)\n {\n for (k = j; k < nstff; ++k)\n {\n if (stff[k][1] < stff[j][1])\n {\n s0 = stff[j][0];\n s1 = stff[j][1];\n stff[j][0] = stff[k][0];\n stff[j][1] = stff[k][1];\n stff[k][0] = s0;\n stff[k][1] = s1;\n }\n }\n }\n} /* lsortx */\n/************************************************/\n\nint loverlap(int p1j, int p2j, int p1k, int p2k)\n/*\n check how much symbols j and k overlap in dimension p\n\n called by lbent, lleggesture, lhastap, lhasgesture,\n lseeksym, ldopivot,\n\t\t\t \n calls lgetout,\n*/\n{\n int p1max,p2min;\n int lap;\n\n if ((p1j > p2j)||(p1k > p2k))\n {\n\t printf(\"OOPS: loverlap %d %d %d %d\\n\",\n\t\t p1j,p2j,p1k,p2k);\n\t lgetout(j);\n }\n lap = FALSE;\n if (p1k < p1j)\n p1max = p1j;\n else\n p1max = p1k; \n if (p2k < p2j)\n p2min = p2k;\n else\n p2min = p2j;\n lap = p2min - p1max;\n return(lap);\n} /* loverlap */\n/********************************************/\n\nvoid lfindstaff(void)\n/*\n find the centres of the staves\n\n called by linter,\n\tcalls lsortx, lgetout,\n*/\n{\n int j,jp,jq;\n int k,kp,kq;\n int staffstart;\n int nstaffstart;\n int nstff;\n int stff[TMAX][2];\n\n k = 0;\n staffstart = 0;\n for (j = 0; j < nlabs; ++j)\n {\n if (lbn[j].m == Stav)\n {\n stff[k][0] = j;\n stff[k][1] = lbn[j].x;\n if (lbn[j].y > staffstart)\n staffstart = lbn[j].y;\n nstaffstart = j;\n ++k;\n lbn[j].a = DONE;\n }\n }\n if (k < 3)\n {\n printf(\"lfindstaff: only %d staff lines\\n\",k);\n lgetout(1);\n if (ok == 1) goto rtrn;\n }\n if (k > TMAX)\n {\n printf(\"lfindstaff: %d staff lines, max %d\\n\",k,TMAX);\n lgetout(1);\n if (ok == 1) goto rtrn;\n } \n nstff = k;\n lsortx(stff,nstff);\n k = 0;\n for (j = 1; j < nstff; j += 3)\n {\n staff[k][0] = stff[j][0];\n staff[k][1] = stff[j-1][1];\n staff[k][2] = stff[j][1];\n staff[k][3] = stff[j+1][1];\n staff[k][4] = -1;\n staff[k][5] = TODO;\n ++k;\n }\n nstaff = k;\n stmiddle = (staff[0][2] + staff[nstaff-1][2])/2;\n npins = 0;\n // seek pins under center stafflines\n for (j = 0; j < nstaffstart; ++j)\n {\n if (lbn[j].m == Pins)\n {\n jp = lbn[j].x;\n jq = lbn[j].x2;\n pins[npins][0] = j;\n pins[npins][1] = -1;\n for (k = 0; k < nstaff; ++k)\n {\n kp = staff[k][2] - 1;\n kq = kp+2;\n if (loverlap(jp,jq,kp,kq) > 0)\n {\n if (lbn[j].d == 0)\n {\n staff[k][4] = MAN;\n pins[npins][1] = k;\n\t\t\t\t lbn[j].a = DONE;\n }\n else\n {\n staff[k][4] = WOMAN;\n pins[npins][1] = k;\n\t\t\t\t lbn[j].a = DONE;\n } /* empty pin */\n } /* pin under central staff */\n } /* k : staff lines */\n ++npins;\n } /* a pin found */\n } /* j */\n if (nstaff < 1)\n\t printf(\"No staves found\\n\");\n else\n for (j = 0; j < nstaff; ++j)\n {\n\t\tif (j == 0)\n\t\t\tprintf(\"\\n\");\n printf(\"staff %d: \",j+1);\n if (staff[j][4] == MAN)\n printf(\" man\\n\");\n else\n if (staff[j][4] == WOMAN)\n printf(\" woman\\n\");\n else\n printf(\" no gender\\n\");\n }\nrtrn: ;\n} /* lfindstaff */\n/***************************************************/\n\nvoid lfindystart(void)\n/*\n find y position of first double bar line\n\n called by linter,\n*/\n{\n int j;\n\n ystart = -1;\n/*\n seek initial double bar line -\n*/\n for (j = 0; ((j < nlabs)&&(ystart < 0)); ++j)\n {\n if ((lbn[j].m == Bars) && (lbn[j].d == LOW))\n\t ystart = lbn[j].y + 1;\n }\n/*\n if none, seek any bar line -\n*/\n if (ystart < 0)\n {\n for (j = 0; ((j < nlabs)&&(ystart < 0)); ++j)\n if (lbn[j].m == Bars) ystart = lbn[j].y + 1;\n }\n/*\n if none, seek any supporting direction symbol -\n*/\n if (ystart < 0)\n {\n for (j = 0; ((j < nlabs)&&(ystart < 0)); ++j)\n if ((lbn[j].m == Dirn) &&\n\t ((lbn[j].c == 1) || (lbn[j].c == -1)) )\n\t ystart = lbn[j].y;\n }\n ystart -= 3;\n if (ystart < 0)\n {\n printf(\n \"linter : findystart finds no direction support symbols\\n\");\n lgetout(1);\n ystart = 0;\n }\n} /* lfindystart */\n/**************************************************/\n\nvoid lchange(char d)\n/*\n change bend in ankles,legs, and hips while stepping\n\n called by ldostep,\n*/\n{\n if (d == 'L')\n {\n fprintf(nudesfile,\n \"repeat %3d %3d set tcls1 tlow1\\n\",fstart,fend);\n fprintf(nudesfile,\n \"repeat %3d %3d set tcls2 tlow2\\n\",fstart,fend);\n fprintf(nudesfile,\n \"repeat %3d %3d set tcls3 tlow3\\n\",fstart,fend);\n fprintf(nudesfile,\n \"repeat %3d %3d set lcls1 llow1\\n\",fstart,fend);\n fprintf(nudesfile,\n \"repeat %3d %3d set lcls2 llow2\\n\",fstart,fend);\n fprintf(nudesfile,\n \"repeat %3d %3d set lcls3 llow3\\n\",fstart,fend);\n fprintf(nudesfile,\n \"repeat %3d %3d set fcls1 flow1\\n\",fstart,fend);\n fprintf(nudesfile,\n \"repeat %3d %3d set fcls2 flow2\\n\",fstart,fend);\n fprintf(nudesfile,\n \"repeat %3d %3d set fcls3 flow3\\n\",fstart,fend);\n }\n else if (d == 'M')\n {\n fprintf(nudesfile,\n \"repeat %3d %3d set tcls1 trlx1\\n\",fstart,fend);\n fprintf(nudesfile,\n \"repeat %3d %3d set tcls2 trlx2\\n\",fstart,fend);\n fprintf(nudesfile,\n \"repeat %3d %3d set tcls3 trlx3\\n\",fstart,fend);\n fprintf(nudesfile,\n \"repeat %3d %3d set lcls1 lrlx1\\n\",fstart,fend);\n fprintf(nudesfile,\n \"repeat %3d %3d set lcls2 lrlx2\\n\",fstart,fend);\n fprintf(nudesfile,\n \"repeat %3d %3d set lcls3 lrlx3\\n\",fstart,fend);\n fprintf(nudesfile,\n \"repeat %3d %3d set fcls1 frlx1\\n\",fstart,fend);\n fprintf(nudesfile,\n \"repeat %3d %3d set fcls2 frlx2\\n\",fstart,fend);\n fprintf(nudesfile,\n \"repeat %3d %3d set fcls3 frlx3\\n\",fstart,fend);\n }\n else if (d == 'H')\n {\n if ((ji != 1)&&(ji != 10))\n {\n fprintf(nudesfile,\n \"repeat %3d %3d set tcls1 thig1\\n\",fstart,fend);\n fprintf(nudesfile,\n \"repeat %3d %3d set tcls2 thig2\\n\",fstart,fend);\n fprintf(nudesfile,\n \"repeat %3d %3d set tcls3 thig3\\n\",fstart,fend);\n fprintf(nudesfile,\n \"repeat %3d %3d set lcls1 lhig1\\n\",fstart,fend);\n fprintf(nudesfile,\n \"repeat %3d %3d set lcls2 lhig2\\n\",fstart,fend);\n fprintf(nudesfile,\n \"repeat %3d %3d set lcls3 lhig3\\n\",fstart,fend);\n fprintf(nudesfile,\n \"repeat %3d %3d set fcls1 fmed1\\n\",fstart,fend);\n fprintf(nudesfile,\n \"repeat %3d %3d set fcls2 fmed2\\n\",fstart,fend);\n fprintf(nudesfile,\n \"repeat %3d %3d set fcls3 fmed3\\n\",fstart,fend);\n }\n else\n if ((ji != 5)&&(ji != 6))\n {\n fprintf(nudesfile,\n \"repeat %3d %3d set tcls1 trlx1\\n\",fstart,fend);\n fprintf(nudesfile,\n \"repeat %3d %3d set tcls2 trlx2\\n\",fstart,fend);\n fprintf(nudesfile,\n \"repeat %3d %3d set tcls3 trlx3\\n\",fstart,fend);\n fprintf(nudesfile,\n \"repeat %3d %3d set lcls1 lrlx1\\n\",fstart,fend);\n fprintf(nudesfile,\n \"repeat %3d %3d set lcls2 lrlx2\\n\",fstart,fend);\n fprintf(nudesfile,\n \"repeat %3d %3d set lcls3 lrlx3\\n\",fstart,fend);\n fprintf(nudesfile,\n \"repeat %3d %3d set fcls1 fhig1\\n\",fstart,fend);\n fprintf(nudesfile,\n \"repeat %3d %3d set fcls2 fhig2\\n\",fstart,fend);\n fprintf(nudesfile,\n \"repeat %3d %3d set fcls3 fhig3\\n\",fstart,fend);\n }\n }\n else\n {\n printf(\"linter: funny depth parameter, frame %d\\n\",\n\t\t fstart);\n lgetout(1);\n }\n} /* lchange */\n/*********************************************/\n\nvoid lsetcoords(void)\n/*\n set the coordinate system for a step\n\t\n\tcalled by ldostep, lleggesture\n\t*/\n{\n\tif ( dofig == MAN )\n\t{\n\t\tif (mspace == false)\n\t\t\tfprintf( nudesfile, \"repeat %3d %3d set coords mpelvis\\n\",\n\t\t\t\tfstart, fend);\n\t\telse\n\t\t\tfprintf( nudesfile, \"repeat %3d %3d set coords mspace\\n\",\n\t\t\t\tfstart, fend);\n\t}\n\telse\n\t{\n\t\tif (wspace == FALSE)\n\t\t\tfprintf( nudesfile, \"repeat %3d %3d set coords wpelvis\\n\",\n\t\t\t\tfstart, fend);\n\t\telse\n\t\t\tfprintf( nudesfile, \"repeat %3d %3d set coords wspace\\n\",\n\t\t\t\tfstart, fend);\n\t}\t/* dofig == WOMAN */\n} /* lsetcoords */\n/************************************************/\n\nvoid ldostep(void)\n/*\n create NUDES calls for steps on score\n\n called by laction,\n calls lsetframes, lfindnext, lsetcoords\n*/\n{\n\tint b;\n\tint havestep;\n\tint k;\n\tint n;\n\n\tb = 0;\n\tif ( ( jm == Dirn ) && ( ( jc == -1 ) || ( jc == 1 ) ) )\n\t{\n\t\thavestep = TRUE;\n\t\tk = lfindnext ( jc, jy + jh, jy + 2 * jh );\n\t\tif ( ji > 5 )\n\t\t\tn = ji - 5;\n\t\telse\n\t\t\tn = ji + 5;\n\t\tfprintf( nudesfile, \"*\\n\" );\n\t\tif ( ( jc == -1 ) &&\n\t\t\t( ( ji == 1 ) || ( ji == 5 ) || ( ji == 3 ) ) )\n\t\t{\n\t\t\tprintf( \"dostep: funny symbol in left support column, line %d, bar %d\\n\",\n\t\t\t\tj, nbar );\n\t\t\tprintf( \"%3d %3d %3d %3d %3d %3d %d\\n\",\n\t\t\t\tjm, ji, jy, js, jw, jh, jd );\n\t\t}\n\t\telse if ( ( jc == 1 ) &&\n\t\t\t( ( ji == 10 ) || ( ji == 6 ) || ( ji == 8 ) ) )\n\t\t{\n\t\t\tprintf( \"dostep: funny symbol in right support column, line %d, bar %d\\n\",\n\t\t\t\tj, nbar );\n\t\t\tprintf( \"%3d %3d %3d %3d %3d %3d %d\\n\",\n\t\t\t\tjm, ji, jy, js, jw, jh, jd );\n\t\t}\n\t\telse\n\t\t{\n\t\t\tfprintf( nudesfile, \"repeat %3d %3d set fend %d\\n\",\n\t\t\t\tfstart, fend, frange );\n\t\t\tfprintf( nudesfile, \"repeat %3d %3d call %s\\n\",\n\t\t\t\tfstart, fend, risesub[jd] );\n\t\t\tlsetcoords();\n\t\t\tif ( jc > 0 )\n\t\t\t\tfprintf( nudesfile, \"repeat %3d %3d call forright\\n\",\n\t\t\t\t\tfstart, fend );\n\t\t\telse\n\t\t\t\tfprintf( nudesfile, \"repeat %3d %3d call forleft\\n\",\n\t\t\t\t\tfstart, fend );\n\t\t\tif ( ( ji == 1 ) || ( ji == 10 ) )\n\t\t\t\tfprintf( nudesfile, \"call %3d %3d forward\\n\",\n\t\t\t\t\tfstart, fend );\n\t\t\tif ( ji == 3 )\n\t\t\t\tfprintf( nudesfile, \"call %3d %3d rside\\n\",\n\t\t\t\t\tfstart, fend );\n\t\t\tif ( ji == 8 )\n\t\t\t\tfprintf( nudesfile, \"call %3d %3d lside\\n\",\n\t\t\t\t\tfstart, fend );\n\t\t\tif ( ( ji == 5 ) || ( ji == 6 ) )\n\t\t\t\tfprintf( nudesfile, \"call %3d %3d back\\n\",\n\t\t\t\t\tfstart, fend );\n\t\t\tif ( ji == 2 )\n\t\t\t\tfprintf( nudesfile, \"call %3d %3d rfordiag\\n\",\n\t\t\t\t\tfstart, fend );\n\t\t\tif ( ji == 9 )\n\t\t\t\tfprintf( nudesfile, \"call %3d %3d lfordiag\\n\",\n\t\t\t\t\tfstart, fend );\n\t\t\tif ( ji == 4 )\n\t\t\t\tfprintf( nudesfile, \"call %3d %3d rbacdiag\\n\",\n\t\t\t\t\tfstart, fend );\n\t\t\tif ( ji == 7 )\n\t\t\t\tfprintf( nudesfile, \"call %3d %3d lbacdiag\\n\",\n\t\t\t\t\tfstart, fend );\n\t\t\tif ( ji == 11 )\n\t\t\t\tfprintf( nudesfile, \"call %3d %3d close\\n\",\n\t\t\t\t\tfstart, fend );\n\t\t\tlbn[j].a = DONE;\n\t\t\tpstart = fstart;\n\t\t\tpend = fend;\n\t\t}\n\t\trise = jd;\n\t\tprevc = jc;\n\t\tprevi = ji;\n\t} /* column OK */\n} /* ldostep */\n/****************************************************/\n\nint lhastap(int j)\n/*\n check if symbol j has overlapping ground contact\n \n called by ldopivot,\n*/\n{\n int t;\n int yk;\n int k;\n int kc,ki,ky,ky2;\n char km;\n\n t = -1;\n yk = jy - jh;\n if (yk < 1) yk = 1;\n for (k = yj[yk];\n\t ((t < 0)&&(k < nlabs)&&(lbn[k].y < jy2)); ++k)\n {\n km = lbn[k].m;\n if (km == Misc)\n {\n ki = lbn[k].i;\n if ((ki >= 4) && (ki <= 9))\n {\n kc = lbn[k].c;\n if ((kc == -3)||(kc == -2)||(kc == 2)||(kc == 3))\n {\n ky = lbn[k].y;\n ky2 = ky + lbn[k].h;\n if (loverlap(jy,jy2,ky,ky2) > 0)\n {\n t = k;\n } /* overlap = TRUE*/\n } /* in leg gesture column */\n } /* tap symbol */\n } /* tap menu */\n } /* k */\n return(t);\n} /* lhastap */\n/*******************************************************/\n\nint lhasgesture(int j)\n/*\n check if symbol j has overlapping gesture\n \n called by ldopivot,\n calls loverlap,\n*/\n{\n int kc,ky,ky2;\n int k;\n int g;\n char km;\n\n g = -1;\n for (k = 0; ((g < 0)&&(k < nlabs)); ++k)\n {\n km = lbn[k].m;\n if (km == Dirn)\n {\n kc = lbn[k].c;\n if ((kc == -3)||(kc == 3))\n {\n ky = lbn[k].y;\n ky2 = lbn[k].y2;\n if (loverlap(jy,jy2,ky,ky2) > 0)\n g = k;\n }\n }\n }\n return(g);\n} /* lhasgesture */\n/*******************************************************/\n\nvoid lleggesture(void)\n/*\n do gestures of the legs\n\n called by laction,\n calls lsetframes, lgetout, lsetcoords,\n\n Volm 1 RELAX\n Volm 3 BENT\n Volm 2 STRAIGHT\n Volm 4 STRETCH\n Volm 7 hold\n*/\n{\n if ((jc == -3)||(jc == 3))\n {\n {\n if ((jd < 0) || (jd > 2))\n {\n printf(\"OOPS: dogesture height problem line %d\\n\",j);\n printf(\"%3d %3d %3d %3d %3d %3d %3d %d\\n\",\n\t jm,ji,jx,jy,js,jw,jh,jd);\n lgetout(j);\n if (ok == 1) goto rtrn;\n } /* level funny */\n fprintf(nudesfile,\"*\\n\");\n if (ji==11)\n fprintf(nudesfile,\"* close without weight\");\n else\n if ((ji==1)||(ji==10))\n fprintf(nudesfile,\"* forward gesture\");\n else\n if ((ji==2)||(ji==9))\n fprintf(nudesfile,\"* forward diagonal gesture\");\n else\n if ((ji==3)||(ji==8))\n fprintf(nudesfile,\"* sideways gesture\");\n else\n if ((ji==4)||(ji==7))\n fprintf(nudesfile,\"* back diagonal gesture\");\n else\n if ((ji==5)||(ji==6))\n fprintf(nudesfile,\"* backward gesture\");\n//\n if (jd == LOW)\n fprintf(nudesfile,\" low\\n\");\n if (jd == MED)\n fprintf(nudesfile,\" middle\\n\");\n if (jd == HIGH)\n fprintf(nudesfile,\" high\\n\");\n//\n if (jc < 0)\n {\n if ((ji <= 1)||(ji == 3)||(ji == 5)||(ji > 11))\n {\n printf(\"OOPS: dogesture direction problem line %d\\n\",j);\n printf(\"%3d %3d %3d %3d %3d %3d %3d %3d %d\\n\",\n jm,ji,jx,jy,js,jw,jh,jb,jd);\n lgetout(1);\n if (ok == 1) goto rtrn;\n } /* i wrong */\n fprintf(nudesfile,\n \"repeat %3d %3d call forleft * left = b\\n\",\n fstart,fend);\n } /* left side */\n else if (jc > 0)\n {\n if ((ji < 1)||(ji == 6)||(j == 8)||(ji == 10)||(ji > 11))\n {\n printf(\"OOPS: dogesture direction problem line %d\\n\",j);\n printf(\"%3d %3d %3d %3d %3d %3d %3d %3d %d\\n\",\n jm,ji,jx,jy,js,jw,jh,jb,jd);\n lgetout(1);\n if (ok == 1) goto rtrn;\n } /* i wrong */\n fprintf(nudesfile,\n \"repeat %3d %3d call forright * right = b\\n\",\n fstart,fend);\n } /* right side */\n//\n fprintf(nudesfile,\"repeat %3d %3d centre afoot %s\\n\",\n fstart,fend,xyz[dofig]);\n//\n if (ji == 11)\n {\n fprintf(nudesfile,\"repeat %3d %3d call %s\\n\",\n fstart,fend,risesub[rise]);\n fprintf(nudesfile,\"repeat %3d %3d set fend %d\\n\",\n fstart,fend,frange);\n\t\t\t\tlsetcoords();\n fprintf(nudesfile,\"call %3d %3d close\\n\",\n fstart,fend);\n } /* close without weight */\n else\n {\n fprintf(nudesfile,\n \"quadratic %3d %3d bendto bthigh bhip pelvis %d %d %d\\n\",\n fstart,fend,stt[jd][ji][0],stt[jd][ji][1],stt[jd][ji][2]);\n if ((jd == LOW)&&((ji == 1)||(ji == 3)||(ji == 8)||(ji == 10))||(jb == 2)||(jb == 4))\n fprintf(nudesfile,\n \"linear %3d %3d bendto bleg bknee bthigh lhig1 lhig2 lhig3\\n\",\n fhalf,fend);\n else\n fprintf(nudesfile,\n \"linear %3d %3d bendto bleg bknee bthigh lrlx1 lrlx2 lrlx3\\n\",\n fhalf,fend);\n fprintf(nudesfile,\n \"linear %3d %3d bendto bfoot bankle bleg fhig1 fhig2 fhig3\\n\",\n fhalf,fend);\n } /* doing a leg gesture */\n if ((ji != 11)&&(hold == NO)||(st < 1))\n fprintf(nudesfile,\n \"repeat %3d %3d moveto fig afoot %s\\n\",\n\t fstart,fend,xyz[dofig]);\n lbn[j].a = DONE;\n } /* no tap and pivot */\n } /* c OK */\nrtrn: ;\n} /* lleggesture */\n/***************************************************/\n\nvoid ldoarms(void)\n/*\n do movements of the arms\n\n called by ldolimb,\n calls lsetframes,\n*/\n{\n int absjc;\n\n if (jc < 0) absjc = -jc; else absjc = jc;\n if ((absjc > 3)&&(absjc < 7))\n {\n if (jm == Dirn)\n {\n fprintf(nudesfile,\"*\\n* arms\\n\");\n if ((jd < 0) || (jd > 2))\n {\n printf(\"ldoarms problem line %d, bar %d\\n\",j,nbar);\n printf(\"%c %3d %3d %3d %3d %3d %3d %d\\n\",\n\t jm,ji,jx,jy,js,jw,jh,jd);\n lgetout(1);\n if (ok == 1) goto rtrn;\n }\n if (jc < 0) // left arm\n {\n if (jb == RELAX)\n {\n\t\t\t\t\tfprintf(nudesfile,\n \"quadratic %3d %3d bendto luarm lshldr shldrs %3d %3d %3d\\n\",\n fstart,fend,arlx[jd][ji][0],arlx[jd][ji][1],arlx[jd][ji][2]);\n\t\t\t\t\tfprintf(nudesfile,\n \"quadratic %3d %3d bendto llarm lelbow luarm %3d %3d %3d\\n\",\n fstart,fend,0,0,45);\n if (dofig == MAN)\n\t\t\t\t\t{ \n\t\t\t\t\t\tfprintf(nudesfile,\n \"quadratic %3d %3d bendto lhand lwrist llarm %3d %3d %3d\\n\",\n fstart,fend,0,0,0);\n\t\t\t\t\t} /* man */\n else\n {\n\t\t\t\t\t\tfprintf(nudesfile,\n \"quadratic %3d %3d bendto lhand lwrist llarm %3d %3d %3d\\n\",\n fstart,fend,270,0,150);\n\t\t\t\t\t} /* woman */\n }\n else\n if (jb == BENT)\n {\n if (ji == 11)\n {\n fprintf(nudesfile,\n \"quadratic %3d %3d bendto luarm lshldr shldrs %3d %3d %3d\\n\",\n fstart,fend,abnt[jd][0][0],abnt[jd][0][1],abnt[jd][0][2]);\n fprintf(nudesfile,\n \"quadratic %3d %3d bendto llarm lelbow luarm %3d %3d %3d\\n\",\n fstart,fend,0,0,70);\n }\n else\n {\n fprintf(nudesfile,\n \"quadratic %3d %3d bendto luarm lshldr shldrs %3d %3d %3d\\n\",\n fstart,fend,abnt[jd][ji][0],abnt[jd][ji][1],abnt[jd][ji][2]);\n fprintf(nudesfile,\n \"quadratic %3d %3d bendto llarm lelbow luarm %3d %3d %3d\\n\",\n fstart,fend,0,0,90);\n } /* ji != 11 */\n }\n else\n {\n fprintf(nudesfile,\n \"quadratic %3d %3d bendto llarm lelbow luarm %3d %3d %3d\\n\",\n fstart,fend,0,0,0);\n fprintf(nudesfile,\n \"quadratic %3d %3d bendto luarm lshldr shldrs %3d %3d %3d\\n\",\n fstart,fend,stt[jd][ji][0],stt[jd][ji][1],stt[jd][ji][2]);\n }\n }\n else // if (jc > 0) = right arm\n {\n if (jb == RELAX)\n {\n\t\t\t\t\tfprintf(nudesfile,\n \"quadratic %3d %3d bendto ruarm rshldr shldrs %3d %3d %3d\\n\",\n fstart,fend,arlx[jd][ji][0],arlx[jd][ji][1],arlx[jd][ji][2]);\n\t\t\t\t\tfprintf(nudesfile,\n \"quadratic %3d %3d bendto rlarm relbow ruarm %3d %3d %3d\\n\",\n fstart,fend,0,0,45);\n if (dofig == MAN)\n\t\t\t\t\t{ \n\t\t\t\t\t\tfprintf(nudesfile,\n \"quadratic %3d %3d bendto rhand rwrist rlarm %3d %3d %3d\\n\",\n fstart,fend,0,0,0);\n\t\t\t\t\t} /* man */\n else\n {\n\t\t\t\t\t\tfprintf(nudesfile,\n \"quadratic %3d %3d bendto rhand rwrist rlarm %3d %3d %3d\\n\",\n fstart,fend,270,0,150);\n\t\t\t } /* woman */\n\n } /* relaxed */\n else\n if (jb == BENT)\n {\t\t \n fprintf(nudesfile,\n \"quadratic %3d %3d bendto ruarm rshldr shldrs %3d %3d %3d\\n\",\n fstart,fend,abnt[jd][ji][0],abnt[jd][ji][1],abnt[jd][ji][2]);\n if (ji == 11)\n fprintf(nudesfile,\n \"quadratic %3d %3d bendto rlarm relbow ruarm %3d %3d %3d\\n\",\n fstart,fend,0,0,70);\n else\n fprintf(nudesfile,\n \"quadratic %3d %3d bendto rlarm relbow ruarm %3d %3d %3d\\n\",\n fstart,fend,0,0,90);\n } /* bent */\n else\n {\n fprintf(nudesfile,\n \"quadratic %3d %3d bendto rlarm relbow ruarm %3d %3d %3d\\n\",\n fstart,fend,0,0,0);\n fprintf(nudesfile,\n \"quadratic %3d %3d bendto ruarm rshldr shldrs %3d %3d %3d\\n\",\n fstart,fend,stt[jd][ji][0],stt[jd][ji][1],stt[jd][ji][2]);\n fprintf(nudesfile,\n \"quadratic %3d %3d bendto rhand rwrist rlarm %3d %3d %3d\\n\",\n fstart,fend,0,0,0);\n } /* not bent or relaxed */\n } /* right arm */\n } /* Dirn symbol */\n } /* in arm column */\n lbn[j].a = DONE;\nrtrn: ;\n} /* ldoarms */\n/***************************************************/\n\nvoid lspotturn(int j, int piv, int fstart, int fend, int g)\n/*\n maintain straight non-standing foot with ground \n contact during turn.\n\n called by ldopivot,\n*/\n{\n int gc,gi;\n\n gi = lbn[g].i;\n gc = lbn[g].c;\n fprintf(nudesfile,\"*\\n* spot turn-\\n\");\n if (gc < 0)\n fprintf(nudesfile,\n \"repeat %3d %3d call forleft * left = b\\n\",fstart,fend);\n else\n fprintf(nudesfile,\n \"repeat %3d %3d call forright * right = b\\n\",fstart,fend);\n fprintf(nudesfile,\n \"repeat %3d %3d centre afoot %s\\n\",\n fstart,fend,xyz[dofig]);\n fprintf(nudesfile,\n \"linear %3d %3d spinby fig afoot pelvis %d y\\n\",\n fstart,fend,piv);\n fprintf(nudesfile,\n \"linear %3d %3d bendto bthigh bhip pelvis %d %d %d\\n\",\n\t fstart,fend,stt[0][ji][0],stt[0][ji][1],stt[0][ji][2]);\n fprintf(nudesfile,\n \"repeat %3d %3d ground fig\\n\",fstart,fend);\n if ((hold == NO)||(st < 1))\n fprintf(nudesfile,\n \"repeat %3d %3d moveto fig afoot %s\\n\",\n fstart,fend,xyz[dofig]);\n fprintf(nudesfile,\n \"linear %3d %3d bendto bfoot bankle bleg fhig1 fhig2 fhig3\\n\",\n\t fstart,fhalf);\n fprintf(nudesfile,\n \"repeat %3d %3d drag bfoot bfoot bankle bleg x\\n\",\n fhalf,fend);\n lbn[j].a = DONE;\n lbn[t].a = DONE;\n} /* lspotturn */\n/******************************************************/\n\nint lgetpin(void)\n/*\n seek a pin in a rotation sign\n\t\n\tcalled by ldolimb, ldopivot,\n\tcalls loverlap,\n*/\n{\n int k,ki;\n int piv;\n int ymost;\n int xlap,ylap;\n\n ki = -123;\n ymost = -1;\n for (k = yj[jy-jh]; lbn[k].y < jy2; ++k)\n {\n if (lbn[k].m == Pins)\n {\n xlap = loverlap(jx,jx2,lbn[k].x,lbn[k].x2);\n ylap = loverlap(jy,jy2,lbn[k].y,lbn[k].y2);\n if ((xlap > 0) && (ylap > ymost))\n {\n ki = lbn[k].i;\n ymost = ylap;\n } /* pin overlaps more than previous pins */\n } /* got a pin */\n } /* k loop looking for overlapping pin */\n\t piv = 0;\n if ((ki > 0)&&(ki <= 9))\n {\n if (ji == 1) piv = -45*(9-ki);\n if (ji == 2) piv = 45*(ki-1);\n if (ki == 1) piv = 360;\n }\n return(piv);\n} /* lgetpin */\n/***************************************************/\n\nvoid ldopivot(void)\n/*\n do turns in the support columns\n\n called by laction,\n calls lsetframes, lspotturn, lhasgesture, \n lhastap, lgetpin,\n*/\n{\n int g,t;\n int piv;\n\n if ( (jm == Rotn)&&(nbar > 0)&&\n\t ((jc == -2)||(jc == -1)||(jc == 1)||(jc == 2)) )\n {\n piv = lgetpin();\n if (fstart < 1) fstart = 1;\n g = lhasgesture(j);\n t = lhastap(j);\n if ((g > 0)&&(t > 0))\n {\n lspotturn(j,piv,fstart,fend,g);\n pstart = fstart;\n pend = fend;\n }\n else\n {\n fprintf(nudesfile,\"*\\n* pivot\\n\");\n if (jc < 0)\n fprintf(nudesfile,\n \"repeat %3d %3d call forleft * b = left\\n\",fstart,fend);\n else\n fprintf(nudesfile,\n \"repeat %3d %3d call forright * b = right\\n\",fstart,fend); \t\n fprintf(nudesfile,\n \"repeat %3d %3d centre bfoot %s\\n\",\n fstart,fstart+1,xyz[dofig]);\n fprintf(nudesfile,\n \"linear %3d %3d spinby fig bfoot pelvis %d y\\n\",\n fstart,fend,piv);\n if ((hold == NO)||(st < 1))\n\t\t\t fprintf(nudesfile,\n \"repeat %3d %3d moveto fig bfoot %s\\n\",\n fstart,fend,xyz[dofig]);\n if (hold == PR)\n hold = NO;\n pstart = fend;\n pend = fend+1;\n } /* spotturn == false */\n }\n} /* ldopivot */\n/**************************************************/\n\nint lseeksym(char m, int i, int x1, int x2, int y3, int y4)\n/*\n seek a symbol of menu m, item i,\n\t overlapping box x1,x2,y1,y2.\n\n\t called by lbows, lsethold,\n\t call loverlap,\n*/\n{\n int lap;\n int kstart;\n int k,kx,kx2,ky,ky2;\n int y1,y2;\n\n lap = -1;\n if (y3 < 0) y1 = 0; else y1 = y3;\n if (y4 < 0) y2 = 0; else y2 = y4;\n kstart = y1 - 2*STEP;\n if (kstart < 1) kstart = 1;\n for (k = yj[kstart]; ((lap < 0)&&(lbn[k].y < y2)); ++k)\n {\n kx = lbn[k].x;\n kx2 = lbn[k].x2;\n ky = lbn[k].y;\n ky2 = lbn[k].y2;\n if ((lbn[k].m == m )&&(lbn[k].i == i))\n {\n if ((loverlap(x1,x2,kx,kx2) > 0)\n && (loverlap(y1,y2,ky,ky2) > 0))\n {\n lap = k;\n }\n } /* m and i true */\n } /* k loop */\n return(lap);\n} /* lseeksym */\n/***************************************************/\n\nvoid lbows(void)\n/*\n detect and flag the various contact bows.\n\n\t called by linter,\n\t calls lseeksym,\n\nRelevant symbols:-\n m i\n Misc 1 bow\n Limb 4 lhand\n Limb 9 rhand\n Area 1 top/front \n Area 5 back/bottom\n Volm 1 relax\n Volm 2 bent\n Volm 3 straight\n Volm 4 stretch\n Volm 7 hold\n*/\n{\n int centre;\n int held,front,back;\n int mlhand,mrhand,wlhand,wrhand;\n\n centre = (staff[0][2] + staff[1][2])/2;\n for (j = 0; j < nlabs; ++j)\n {\n if ((lbn[j].m == Misc)&&(lbn[j].i == 1))\n {\n lassign();\n held = lseeksym(Volm,1,jx,jx2,jy-STEP,jy2);\n\t if (held > 0)\n {\n mlhand = lseeksym(Limb,4,jx-STEP/2,jx+STEP/2,jy,jy2+STEP);\n mrhand = lseeksym(Limb,9,jx-STEP/2,jx+STEP/2,jy,jy2+STEP);\n wlhand = lseeksym(Limb,4,jx2-STEP/2,jx2+STEP/2,jy,jy2+STEP);\n wrhand = lseeksym(Limb,9,jx2-STEP/2,jx2+STEP/2,jy,jy2+STEP);\n front = lseeksym(Area,1,jx-STEP/2,jx+STEP/2,jy,jy2+STEP);\n if (front < 0)\n front = lseeksym(Area,1,jx2-STEP/2,jx2+STEP/2,jy,jy2+STEP);\n if (front < 0)\n front = lseeksym(Area,2,jx-STEP/2,jx+STEP/2,jy,jy2+STEP);\n if (front < 0)\n front = lseeksym(Area,2,jx2-STEP/2,jx2+STEP/2,jy,jy2+STEP);\n back = lseeksym(Area,5,jx-STEP/2,jx+STEP/2,jy,jy2+STEP);\n if (back < 0)\n back = lseeksym(Area,5,jx2-STEP/2,jx2+STEP/2,jy,jy2+STEP);\n jb = 0;\n if (front > 0)\n {\n jb = FRONT;\n lbn[front].a = DONE;\n }\n else \n if (back > 0)\n {\n jb = BACK;\n lbn[back].a = DONE;\n }\n if (mlhand > 0) jb += MLHAND;\n if (mrhand > 0) jb += MRHAND;\n if (wlhand > 0) jb += WLHAND;\n if (wrhand > 0) jb += WRHAND;\n if (jb <= 0)\n fprintf(nudesfile,\"* OOPS: lbows: bow %d with no contacts\\n\",j);\n else\n {\n if (mlhand > 0) lbn[mlhand].b = jb;\n if (mrhand > 0) lbn[mrhand].b = jb;\n if (wlhand > 0) lbn[wlhand].b = jb;\n if (wrhand > 0) lbn[wrhand].b = jb;\n }\n } /* held */\n fprintf(nudesfile,\"* lbowsb %d %d %d %d %d %d %d %d %d\\n\",\n j+1,held,front,back,mlhand,mrhand,wlhand,wrhand,jb);\n } /* contact bow */\n } /* j */\n} /* lbows */\n/****************************************/\n\nvoid lstart(void)\n/*\n seek pins denoting starting positions.\n\n called by linter,\n*/\n{\n int k;\n int p;\n int dx,dz;\n int mx,my;\n int wx,wy;\n\n mx = -123;\n my = -123;\n wx = -123;\n wy = -123;\n if (nm > 0)\n fprintf(nudesfile,\"\\n*\\ncall 0 1 doman\\n\");\n else\n fprintf(nudesfile,\"\\n*\\ncall 0 1 dowoman\\n\");\n fprintf(nudesfile,\"call 0 1 forleft\\n\");\n for (k = 0; k < npins; ++k)\n {\n if (pins[k][1] < 0)\n {\n p = pins[k][0];\n ji = lbn[p].i;\n if (lbn[p].d == LOW)\n {\n mx = lbn[p].x;\n my = lbn[p].y;\n if (nm > 0)\n fprintf(nudesfile,\n \"*\\nquadratic 0 1 spinby man mlfoot mpelvis %d y\\n\",\n (ji-1)*45);\n }\n else\n {\n wx = lbn[p].x;\n wy = lbn[p].y;\n if (nw > 0)\n fprintf(nudesfile,\n \"*\\nquadratic 0 1 spinby woman wrfoot wpelvis %d y\\n\",\n (ji-1)*45);\n }\n if ((wx > 0)&&(mx > 0)&&(wy > 0)&&(my > 0))\n {\n dx = ((wx - mx)*2)/3;\n dz = (wy - my)/2;\n if (nmw > 0)\n {\n fprintf(nudesfile,\"*\\n\");\n fprintf(nudesfile,\"repeat 0 1 centre mpelvis kx ky kz\\n\");\n fprintf(nudesfile,\"repeat 0 1 moveto woman wpelvis kx ky kz\\n\");\n fprintf(nudesfile,\"repeat 0 1 axes wpelvis cx cy cz\\n\");\n fprintf(nudesfile,\"linear 0 1 set dx %d\\n\",dx);\n fprintf(nudesfile,\"linear 0 1 set dz %d\\n\",dz);\n fprintf(nudesfile,\"linear 0 1 mult wx dx cx\\n\");\n fprintf(nudesfile,\"linear 0 1 mult wz dz cx\\n\");\n fprintf(nudesfile,\"repeat 0 1 centre wpelvis cx cy cz\\n\");\n fprintf(nudesfile,\"linear 0 1 moveby woman mpelvis wx 0 wz\\n\");\n fprintf(nudesfile,\"repeat 0 1 centre wpelvis cx cy cz\\n\");\n\t\t\t fprintf(nudesfile,\"repeat 0 1 set fpos 1\\n\");\n fprintf(nudesfile,\"repeat 0 1 call noposn\\n\");\n }\n }\n }\n }\n} /* lstart */\n/***********************************************/\n \n void lsetrange(void)\n/*\n set range of symbols to be interpreted\n\n called by linter,\n*/\n{\n int bend;\n int k,kmax;\n int ymax;\n\n ystart = 0;\n yend = lbn[0].y;\n ymax = yend;\n sstart = 0;\n ssend = nlabs;\n for (k = 0; k < nlabs; ++k)\n {\n if (lbn[k].m == Bars)\n {\n if (lbn[k].i == bstart)\n {\n sstart = k;\n ystart = lbn[k].y;\n }\n }\n }\n bend = bstart + blength;\n for (k = (sstart+1); k < nlabs; ++k)\n {\n if (lbn[k].m == Bars)\n {\n if (lbn[k].i == bend)\n ssend = k;\n }\n }\n for (k = 0; k < nlabs; ++k)\n {\n if (lbn[k].m == Dirn)\n {\n if (lbn[k].y > yend)\n yend = lbn[k].y;\n if ((lbn[k].y+lbn[k].h) > ymax)\n\t\t {\n ymax = lbn[k].y+lbn[k].h;\n\t\t\tkmax = k;\n\t\t }\n }\n }\n f_max = 2 + int(lbn_fpp*double(ymax));\n\tprintf(\"\\n lsetrange: pixels %d, frames %d\\n\",ymax,f_max);\n} /* lsetrange */\n/****************************************************/\n\nvoid lcopyfigs(void)\n/*\n finish off\n\n called by linter,\n calls lgetout,\n*/\n{\n sprintf(figsname,\"lintel.n\");\n if ((figsfile = fopen(figsname,\"r\")) == NULL)\n {\n if (figsfile) fclose(figsfile);\n printf(\"\\n\\noops %s not in folder\\n\",figsname);\n lgetout(1);\n if (ok == 1) goto rtrn;\n }\n while (fgets(buf,BMAX,figsfile) != NULL)\n fprintf(nudesfile,\"%s\",buf);\nrtrn: ;\n} /* lcopyfigs */\n/********************************************/\n\nvoid lfinish(void)\n/*\n finish off\n\n called by linter,\n calls lgetout,\n*/\n{\n f_max += 2;\n fprintf(nudesfile,\"*\\n\");\n fprintf(nudesfile,\"**************************\\n\");\n fprintf(nudesfile,\"*\\n\");\n if (nm > 0)\n fprintf(nudesfile,\n \"repeat 0 %3d ground man\\n\",f_max);\n else\n fprintf(nudesfile,\n \"repeat 0 1 moveto man mlfoot 10000 10000 10000\\n\");\n if (nw > 0)\n fprintf(nudesfile,\n \"repeat 0 %3d ground woman\\n\",f_max);\n else\n fprintf(nudesfile,\n \"repeat 0 1 moveto woman wlfoot 10000 10000 10000\\n\");\n if (nm > 0)\n fprintf(nudesfile,\n \"repeat 0 %3d centre mpelvis fx fy fz\\n\",f_max);\n else\n fprintf(nudesfile,\n \"repeat 0 %3d centre wpelvis fx fy fz\\n\",f_max);\n if (track == TRUE)\n {\n\t fprintf(nudesfile,\n \"repeat 0 %3d add fy -900 fz\\n\",f_max);\n fprintf(nudesfile,\n \"repeat 0 %3d place fx 500 fy\\n\",f_max);\n }\n fprintf(nudesfile,\n \"repeat 0 %3d observe -9 0 0\\n*\\n\",f_max);\n fprintf(nudesfile,\n \"end dance\\n****************************\\n\");\n fprintf(nudesfile,\n \"*\\nsubroutine setfmax\\n\");\n fprintf(nudesfile,\n \"*\\nrepeat 0 1 set fmax %d\\n\",f_max);\n fprintf(nudesfile,\n \"*\\nend setfmax\\n\");\n fprintf(nudesfile,\n \"****************************\\n*\\nstop\\n\");\n fclose(nudesfile);\n if (nbar > 0)\n frperbar = f_max/nbar;\n else\n frperbar = 0;\n} /* lfinish */\n/********************************************/\n\nvoid lselectfig(void)\n/*\n select figure\n\n called by linter,\n*/\n{ \n int k;\n int nf;\n int nogo;\n int st;\n int stv0,stv1,st4;\n int stv[2];\n char key;\n\nagain:\n for (k = 0; k < nstaff; ++k)\n staff[k][5] = DONE;\n nf = 0;\n nm = 0;\n nw = 0;\n nogo = FALSE;\n if (nstaff < 1)\n printf(\"no staves\\n\");\n else\n if (nstaff == 1)\n {\n staff[0][5] = TODO;\n if (staff[0][4] == MAN) \n ++nm;\n else\n ++nw;\n }\n else\n if (nstaff > 1)\n {\n nmw = 0;\n if (nstaff > TMAX)\n printf(\"This can only interpret staves from 1 to %d\\n\",\n\t\t TMAX);\n if (lbn_figures == 2)\n {\n stv[0] = 1; stv[1] = 2;\n track = TRUE;\n }\n else // (lbn_figures != 2)\n {\n printf(\"\\nPlease type the number of staves to be interpreted\\n\");\n if (gets(buf) == NULL)\n {\n printf(\"OOPS: cannot open standard input\\n\");\n lgetout(1);\n nogo = TRUE;\n if (ok == 1) goto rtrn;\n }\n sscanf(buf,\"%d\",&lbn_figures);\n if (lbn_figures > 2)\n {\n printf(\"sorry; this program can only interpret 2 staves at a time\\n\");\n nogo = TRUE;\n goto again;\n }\n if (lbn_figures == 1)\n printf(\"Please enter the staff number to be interpreted\\n\");\n else\n {\n printf(\"Please enter staff numbers to be interpreted\\n\");\n printf(\"separated by a space, and followed by the 'enter' key.\\n\\n\");\n }\n if (gets(buf) == NULL)\n {\n printf(\"OOPS: cannot read staff numbers\\n\");\n lgetout(1);\n nogo = TRUE;\n if (ok == 1) goto rtrn;\n }\n if (lbn_figures == 1)\n {\n sscanf(buf,\"%d\",&stv0); \n stv[0] = stv0; stv[1] = -1;\n }\n else\n {\n sscanf(buf,\"%d %d\",&stv0,&stv1); \n stv[0] = stv0; stv[1] = stv1;\n }\n } /* lbn_figures != 2 */\n for (nf = 0; nf < lbn_figures; ++nf)\n {\n st = stv[nf]-1;\n if ((st < 0)||(st > nstaff))\n {\n printf(\"OOPS: staff number %d out of range\\n\",st+1);\n goto again;\n }\n st4 = staff[st][4];\n if ( ((nm > 0)&&(st4 == MAN))\n ||((nw > 0)&&(st4 == WOMAN)) )\n {\n printf(\"Sorry: can only do one man and/or one woman.\");\n printf(\"Please select again.\\n\");\n nogo = TRUE;\n } /* more than 1 man or woman */\n else\n {\n if (st4 == WOMAN) ++nw;\n if (st4 == MAN) ++nm;\n staff[st][5] = TODO;\n } /* a man or woman */ \n nmw = nm*nw;\n } /* nf */\n } /* nstaff > 1 */\n if (nogo == TRUE)\n goto again;\n\nrtrn: \n if (lbn_figures != 2)\n {\n track = TRUE;\n printf(\"Track main figure? Hit 'enter' for Yes, any other key for No\\n\");\n key = getchar(); \n if (key != '\\n')\n track = FALSE;\n }\n else\n track = TRUE;\n if (track == FALSE)\n printf(\"\\n tracking OFF\\n\");\n else\n printf(\"\\n tracking ON\\n\");\n} /* lselectfig */\n/***********************************************/\n\nvoid ldobar(void)\n/*\n write bar number out\n\n called by laction,\n*/\n{\n if ((jm == Bars) && (jy < yend))\n {\n ++nbar;\n fprintf(nudesfile,\"*\\n\");\n fprintf(nudesfile,\"***************************\\n\");\n fprintf(nudesfile,\"*\\n\");\n fprintf(nudesfile,\"* bar %d\\n\",nbar);\n fprintf(nudesfile,\"*\\n\");\n }\n} /* ldobar */\n/********************************************/\n\nvoid lbent(void)\n/*\n for Volm symbol : flag next 'Dirn' symbol above\n \n called by laction,\n calls lassign,\n\n Volm 1 RELAX\n Volm 3 BENT\n Volm 2 STRAIGHT\n Volm 4 STRETCH\n Volm 7 hold\n*/\n{\n int g;\n int k;\n int ki,kx,kx2,ky,ky2;\n int jy2h;\n char km;\n\n for (j = 0; j < ssend; ++j)\n {\n if ((lbn[j].m == Volm)&&(lbn[j].i <= STRETCH)) \n {\n lassign();\n jy2h = jy2+jh;\n g = -1;\n for (k = j+1;\t((k < nlabs)&&(g < 0)); ++k)\n {\n km = lbn[k].m;\n if ((km == Dirn)&&(lbn[k].a == TODO))\n {\n ky = lbn[k].y;\n if (ky > jy2h)\n g = 0;\n else\n {\n ky2 = lbn[k].y2;\n kx = lbn[k].x;\n kx2 = lbn[k].x2;\n if ((loverlap(jx,jx2,kx,kx2) > 0)\n &&(loverlap(jy2,jy2h,ky,ky2) > 0))\n {\n g = k;\n lbn[j].b = ji;\n ki = lbn[k].i;\n lbn[j].m = km;\n lbn[j].i = ki;\n lbn[j].y2 = ky2;\n lbn[j].h = lbn[k].y2 - jy;\n lbn[j].d = lbn[k].d;\n lbn[k].a = DONE;\n if (ji == BENT)\n {\n if ((ki == 11)&&(jc < 0))\n lbn[j].i = 8;\n else\n if ((ki == 11)&&(jc > 0))\n lbn[j].i = 3;\n } /* ji == BENT */\n } /* overlapping */\n } /* ky < jy2h */\n } /* km = Dirn */\n } /* k */\n } /* jm = Volm */\n } /* j */\n} /* lbent */\n/********************************************/\n\nvoid lrelease(void)\n/*\n release the hold when jm = Misc\n\n called by laction,\n \t Assumes one of the following holds:\n\n\t So far:\n NO - no hold: arm gestures apply.\n CL - closed hold: normal ballroom dancing position.\n SS - semi-shadow hold: both facing same way, bodies touching, \n man's L hand to lady's L hand,\n man's R hand to front of lady's R hip,\n\t\t lady's R hand free.\n OE - open extended hold: both facing same way, bodies apart,\n man's R hand to lady's L hand, other hands free.\n CO - counter open extended hold: both facing same way, bodies apart,\n man's L hand to lady's R hand, other hands free.\n SH - shadow hold: both facing same way, bodies touching,\n L hand to L hand, R hand to R hand.\n\n later to do:\n PR - promenade position: facing partner, bodies touching,\n but both prepared to travel to man's L.\n CP - counter promenade position: facing partner, bodies touching,\n but both prepared to travel to man's R.\n DB - double hold: facing partner, bodies apart,\n L hand to R hand, R hand to L hand.\n OP - open hold: facing partner, bodies apart,\n man's L hand to lady's R hand, other hands free.\n CR - crossed open hold: facing partner, bodies apart,\n man's R hand to lady's R hand, other hands free.\n\n\tRelevant symbols:-\n m i\n Misc 1 bow\n Misc 2 release1\n Misc 3 release2\n Limb 4 lhand\n Limb 9 rhand\n Area 1 top/front \n Area 5 back/bottom\n Volm 1 RELAX\n Volm 3 BENT\n Volm 2 STRAIGHT\n Volm 4 STRETCH\n Volm 7 hold\n\n FRONT 100 // front symbol found\n BACK 200 // back symbol found\n MLHAND 1 // man's left hand symbol found\n MRHAND 2 // man's right hand symbol found\n WLHAND 10 // woman's left hand symbol found\n WRHAND 20 // woman's right hand symbol found\n*/\n{\n int fdif;\n int fbegin,ffin;\n\n if ((nmw > 0)&&(ji == 2)) // release\n {\n holdcl = 0;\n holdoe = 0;\n holdco = 0;\n holdpr = 0;\n holdsh = 0;\n holdss = 0;\n fbegin = pend;\n ffin = fend;\n if (ffin <= fbegin) ffin = fbegin + 1;\n fdif = ffin - fbegin;\n if ((st > 0) && (hold != NO))\n {\n fprintf(nudesfile,\n \"repeat %3d %3d set fpos %d\\n\",fbegin,ffin,fdif);\n fprintf(nudesfile,\n \"call %3d %3d noposn\\n*\\n\",fbegin,ffin);\n }\n hold = NO;\n fprintf(nudesfile,\n \"* lreleasea %d %d %d %d %d %d\\n\",\n fstart,fend,j,jb,hold,prevhold);\n keptf = ffin;\n }\n} /* lrelease */\n/******************************************/\n\nvoid ldoposn(void)\n/*\n set up a couple dance position\n\n called by lsethold, ldohold\n*/\n{\n\t fbegin = fstart;\n\t ffin = fend;\n\t \t fprintf(nudesfile,\n\t\t\"** ldoposn %3d %3d, %3d %3d\\n\",fbegin,ffin, st,hold);\n if (st > 0)\n {\n\t\t\tflen = ffin - fbegin;\n\t\t\tif (flen < 1) flen = 1;\n\t\t\tif (hold != NO) \n fprintf(nudesfile,\n \"repeat %3d %3d set fpos %3d\\n\",\n fbegin,ffin,flen);\n\t\t\tif (hold == PR)\n fprintf(nudesfile,\n \"call %3d %3d prposn\\n*\\n\",fbegin,ffin);\n\t\t\telse\n\t\t\tif (hold == CO)\n fprintf(nudesfile,\n \"call %3d %3d coposn\\n*\\n\",fbegin,ffin);\n\t\t\telse\n\t\t\tif (hold == CL)\n fprintf(nudesfile,\n \"call %3d %3d clposn\\n*\\n\",fbegin,ffin);\n\t\t\telse\n\t\t\tif (hold == SS)\n fprintf(nudesfile,\n \"call %3d %3d ssposn\\n*\\n\",fbegin,ffin);\n\t\t\telse\n\t\t\tif (hold == OE)\n fprintf(nudesfile,\n \"call %3d %3d oeposn\\n*\\n\",fbegin,ffin);\n\t\t\telse\n\t\t\tif (hold == SH)\n fprintf(nudesfile,\n \"call %3d %3d shposn\\n*\\n\",fbegin,ffin);\n\t\t\tkeptf = ffin;\n\t\t\tprevhold = hold;\n } /* st > 0 */\n} /* ldoposn */\n/*******************************************/\n\nvoid ldokeep(void)\n/*\n maintain a couple dancing position\n\n called by dohold,\n*/\n{\n\tfprintf(nudesfile,\n\t\t\"** ldokeep %3d %3d, %3d\\n\",fbegin,ffin,hold);\n if (hold == PR)\n fprintf(nudesfile,\n \"repeat %3d %3d call prkeep\\n*\\n\",fbegin,ffin);\n else\n if (hold == CL)\n\t\t fprintf(nudesfile,\n \"repeat %3d %3d call clkeep\\n*\\n\",fbegin,ffin);\n else\n if (hold == OE)\n fprintf(nudesfile,\n \"repeat %3d %3d call oekeep\\n*\\n\",fbegin,ffin);\n else\n if (hold == SS)\n fprintf(nudesfile,\n \"repeat %3d %3d call sskeep\\n*\\n\",fbegin,ffin);\n\t\t else\n if (hold == CO)\n fprintf(nudesfile,\n \"repeat %3d %3d call cokeep\\n*\\n\",fbegin,ffin);\n else\n if (hold == SH)\n fprintf(nudesfile,\n \"repeat %3d %3d call shkeep\\n*\\n\",fbegin,ffin);\n keptf = ffin;\n} /* ldokeep */\n/******************************************/\n\nvoid ldohold(void)\n/*\n set up and maintain holds\n\t\n\tcalled by laction,\n\tcalls ldokeep, ldoposn,\n*/\n{\n fbegin = keptf;\n ffin = pend;\n\t\tfprintf(nudesfile,\n\t\t\"** ldohold %3d %3d, %3d %3d\\n\",fbegin,ffin, hold,prevhold);\n if (prevhold == hold) \n {\n\t fbegin = keptf;\n if (fbegin < ffin) ldokeep();\n } /* prevhold == hold */\n else\n {\n ldoposn();\n } /* prevhold != hold */\n} /* ldohold */\n/*************************************************/\n\nvoid lsethold(void)\n/*\n set the hold if jm = Limb or jm = Face\n\n called by laction,\n calls ldoposn,\n\n Uses the hand signs to determine the holds if any.\n \t Assumes one of the following holds:\n\n\t So far:\n NO - no hold: arm gestures apply.\n CL - closed hold: normal ballroom dancing position.\n SS - semi-shadow hold: both facing same way, bodies touching, \n man's L hand to lady's L hand,\n man's R hand to front of lady's R hip,\n ady's R hand free.\n OE - open extended hold: both facing same way, bodies apart,\n man's R hand to lady's L hand, other hands free.\n CO - counter open extended hold: both facing same way, bodies apart,\n man's L hand to lady's R hand, other hands free.\n SH - shadow hold: both facing same way, bodies touching,\n L hand to L hand, R hand to R hand.\n PR - promenade position: diagonally facing partner,\n bodies touching, both travelling to man's L.\n CP - counter promenade position: facing partner, bodies touching,\n but both prepared to travel to man's R.\n DB - double hold: facing partner, bodies apart,\n L hand to R hand, R hand to L hand.\n OP - open hold: facing partner, bodies apart,\n man's L hand to lady's R hand, other hands free.\n CR - crossed open hold: facing partner, bodies apart,\n man's R hand to lady's R hand, other hands free.\n\n#define NO 0 // no hold\n#define CL 1 // closed hold\n#define PR 2 // promenade position\n#define CP 3 // counter promenade position\n#define DB 4 // double hold\n#define OP 5 // open hold\n#define CR 6 // crossed open hold\n#define OE 7 // open extended hold\n#define CO 8 // counter open extended hold\n#define SH 9 // shadow hold\n#define SS 10 // semi-shadow hold \n\n\tRelevant symbols:-\n m i\n Misc 1 bow\n Misc 2 release1\n Misc 3 release2\n Limb 4 lhand\n Limb 9 rhand\n Area 1 top/front \n Area 5 back/bottom\n Volm 1 RELAX\n Volm 3 BENT\n Volm 2 STRAIGHT\n Volm 4 STRETCH\n Volm 7 hold\n\n#define FRONT 100 // front/top symbol found\n#define BACK 200 // back symbol found\n#define MLHAND 1 // man's left hand symbol found\n#define MRHAND 2 // man's right hand symbol found\n#define WLHAND 10 // woman's left hand symbol found\n#define WRHAND 20 // woman's right hand symbol found\n*/\n{\n int i,n;\n int dy,ylap;\n\n prevhold = hold;\n mface = -1;\n wface = -1;\n facedif = -1;\n\tif ((jm == Face)&&(oriented == FALSE)&&\n\t\t(((dofig == MAN)&&(jc < 0))||(dofig == WOMAN)&&(jc > 0)))\n\t{\n\t\tfprintf(nudesfile,\n\t\t\t\"linear %3d %3d spinby fig afoot pelvis %d y\\n\",\n 0,1,(ji-1)*45);\n\t\toriented = TRUE;\n\t}\n if ((jm == Limb)&&((ji == 4)||(ji == 9)))\n {\n if (jb == 11) { ++holdss; ++holdsh; }\n if (jb == 12) ++holdoe;\n if (jb == 21) { ++holdco; ++holdcl; ++holdpr; }\n if (jb == 22) ++holdsh;\n if (jb == 110) { ++holdcl; ++holdpr; }\n if (jb == 102) ++holdss;\n if (jb == 120) ++holdss;\n if (jb == 202) { ++holdcl; ++holdpr; }\n } /* jm = a hand */\n else\n if ((jm == Face)&&(jx > stmiddle))\n {\n n = -1;\n ylap = -1;\n wface = ji;\n for (i = 1; i < 9; ++i)\n {\n n = lseeksym(Face,i,xmin,stmiddle,jy,jy2);\n if (n >= 0)\n {\n dy = loverlap(jy,jy2,lbn[n].y,lbn[n].y2);\n if (dy > ylap)\n {\n ylap = dy;\n mface = i;\n }\n } /* found man facing sign */\n }\n if (mface >= 0)\n {\n facedif = mface - wface;\n if (facedif < 0) facedif += 8;\n if (facedif > 7) facedif -= 8;\n }\n else\n facedif = -1;\n if (facedif == 0)\n {\n facecl = 0;\n facepr = 0;\n facesh = 1;\n facess = 1;\n } /* facing same way */\n else\n if (facedif == 2)\n {\n facecl = 0;\n facepr = 1;\n facesh = 0;\n facess = 0;\n } /* facing at right angles */\n else\n if (facedif == 4)\n {\n facecl = 1;\n facepr = 0;\n facesh = 0;\n facess = 0;\n } /* facing opposite ways */\n } /* jm == Face */\n if (holdoe > 1) if (hold != CO) hold = OE;\n if (holdco > 1) if (hold != OE) hold = CO;\n if ((facesh+holdsh) > 4) hold = SH;\n if ((facess+holdss) > 4) hold = SS;\n if ((facepr+holdpr) > 4) hold = PR;\n if ((facecl+holdcl) > 4) hold = CL;\n\tfprintf(nudesfile,\n\t\t\"** lsethold %d %d, %d %d, %d %d, %d %d, %d %d, %3d %3d %3d\\n\",\n\t\thold, prevhold,\n\t\tfacesh,holdsh,facess,holdss,facepr,holdpr,facecl,holdcl,\n\t\tmface,wface,facedif);\n if (prevhold != hold) ldoposn();\n} /* lsethold */\n/********************************************/\n\nvoid ldochest(int piv)\n/*\n rotate the chest and stomach\n \n called by ldolimb,\n*/\n{\n if (piv == 0)\n {\n\t fprintf(nudesfile,\n \"quadratic %3d %3d bendto chest ribs stomach 0 0 0\\n\",\n fstart,fend);\n\t fprintf(nudesfile,\n \"quadratic %3d %3d bendto stomach waist pelvis 0 0 0\\n\",\n fstart,fend);\n } /* piv == 0 */\n else\n {\n if (dofig == MAN)\n fprintf(nudesfile,\n \"quadratic %3d %3d rotate chest ribs %3d\\n\",\n fstart,fend,-piv/2);\n else\n\t fprintf(nudesfile,\n \"quadratic %3d %3d rotate chest ribs %3d\\n\",\n fstart,fend,piv/2);\n fprintf(nudesfile,\n \"quadratic %3d %3d rotate stomach waist %3d\\n\",\n fstart,fend,piv/2);\n } /* piv != 0 */\n} /* ldochest */\n/******************************************/\n\nvoid ldolimb(void)\n/*\n do something to some body part\n \n called by laction,\n calls ldoarms, ldochest,\n\n\tVolm 7 + Area 9 = chest\n*/\n{\n\tint nc;\n\tint piv;\n\n\tnc = jc+8;\n\tpiv = -1;\n\tif ( (colm[nc] == ARM)&&(jm == Dirn)&&\n\t\t((hold == NO)||(hold == OE)||(hold == CO)) )\n ldoarms();\n\telse\n\tif (jm == Limb)\n\t\tcolm[nc] = Limb;\n\telse\n\tif ((jm == Volm)&&(ji == 7)\n\t\t&&(colm[nc] == Area)&&(jd == BLANK))\n\t{\n\t\tcolm[nc] = CHEST;\n\t fprintf(nudesfile,\"* ldolimba CHEST at column %d\\n\",nc);\n\t}\n\telse\n\tif ((jm == Area)&&(ji == 9)\n\t\t&&(colm[nc] == Volm)&&(jd == BLANK))\n\t\tcolm[nc] = CHEST;\n\telse\n\tif ((jm == Area)&&(ji == 9))\n\t\tcolm[nc] = Area;\n\telse\n\tif ((jm == Volm)&&(ji == 7))\n\t\tcolm[nc] = Volm;\n\telse\n\tif ((jm == Rotn)&&(colm[nc] == CHEST))\n\t{\n\t\tpiv = lgetpin();\n\t\tldochest(piv);\n\t}\n} /* ldolimb */\n/*********************************************/\n\nvoid lcoords(char jm, int ji)\n/*\n\tcheck for change of coordinates\n\n\tcalled by laction,\n\tcalls lseeksym, lgetpin\n\n\tRelevant symbols:-\n\tm i\n\tVolm 5 space hold\n\tVolm 6 coordinates\n\tVolm 7 body hold\n\tArea 9 square\n\tPins 1 forward\n\tPins 5 backward\n\t\n\t 1 Aug 2006 checking piv against maxint\n\t30 Jul 2006 writing bendtos for mspace and wspace\n*/\n{\n\tint k;\n\tint piv;\n\n\tif ((jm == Area)&&(ji == 9))\n\t{\n\t\tpiv = lgetpin ( );\n\t\t//fprintf(nudesfile,\"* lcoordsa %c %d\\n\",m,piv);\n\t\tif (piv != maxint)\n\t\t{\n\t\t\tif (piv == 360) piv = 0;\n\t\t\t//coordinates = SPACE;\n\t\t\tif ( dofig == MAN )\n\t\t\t{\n\t\t\t\tfprintf(nudesfile,\n\t\t\t\t\t\"repeat %d %d bendto mspace jman joist 270 0 %d\\n\",\n\t\t\t\t\tfstart, fend, piv);\n\t\t\t mspace = true;\n\t\t\t}\n\t\t\telse\n\t\t\t{\n\t\t\t\tfprintf(nudesfile,\n\t\t\t\t\t\"repeat %d %d bendto wspace jwoman joist 270 0 %d\\n\",\n\t\t\t\t\tfstart, fend, piv);\n\t\t\t\twspace = TRUE;\n\t\t\t}\n\t\t} /* space stance found */\n\t} /* possible space stance found */\n\telse\n\t{\n\t\tk = lseeksym(Volm,7,jx,jx2,jy,jy2);\n\t\tif (k > 0)\n\t\t{\n\t\t\t//coordinates = BODY;\n\t\t\tif ( dofig == MAN )\n\t\t\t mspace = false;\n\t\t\telse\n\t\t\t\twspace = FALSE;\n\t\t} /* body stance found */\n\t\t//fprintf(nudesfile,\"* lcoordsb mspace wspace TRUE\\n\",\n\t\t\t//mspace,wspace,TRUE);\n\t} /* possible body stance found */\n} /* lcoords */\n/*****************************************/\n\nvoid ldotoetaps ( void )/*\n/*\n\n\tdo toe taps with gestures of the legs\n\tdoing diagonals sideways at present\n\n\tVolm 1 RELAX\n\tVolm 3 BENT\n\tVolm 2 STRAIGHT\n\tVolm 4 STRETCH\n\tVolm 7 hold\n\n\tcalled by laction,\n\tcalls lgetout, lsetframes, bell,\n\n\t19 Aug 2006 d076- Don Herbison-Evans\n*/\n{\n\tif ( (( jc == -3 )||( jc == 3 )) && ( jd == -1 ) )\n\t{\n\t\t\tfprintf ( nudesfile, \"*\\n\" );\n\t\t\tif ( ji==11 )\n\t\t\t\tfprintf ( nudesfile, \"* in place tap\\n\" );\n\t\t\telse if ( ( ji == 1 ) || ( ji == 10 ) )\n\t\t\t\tfprintf ( nudesfile, \"* forward tap\\n\" );\n\t\t\telse if ( ( ji == 2 ) || ( ji == 9 ) )\n\t\t\t\tfprintf ( nudesfile, \"* forward diagonal tap\\n\" );\n\t\t\telse if ( ( ji == 3 ) || ( ji == 8 ) )\n\t\t\t\tfprintf ( nudesfile, \"* sideways tap\\n\" );\n\t\t\telse if ( ( ji == 4 ) || ( ji == 7 ) )\n\t\t\t\tfprintf ( nudesfile, \"* back diagonal tap\\n\" );\n\t\t\telse if ( ( ji == 5 ) || ( ji == 6 ) )\n\t\t\t\tfprintf( nudesfile, \"* backward tap\\n\" );\n\t\t\t//\n\t\t\tif ( dofig == MAN )\n\t\t\t{\n\t\t\t\tif (mspace == false)\n\t\t\t\t\tfprintf( nudesfile, \"repeat %3d %3d set coords mpelvis\\n\",\n\t\t\t\t\tfstart, fend );\n\t\t\t\telse\n\t\t\t\t\tfprintf( nudesfile, \"repeat %3d %3d set coords mspace\\n\",\n\t\t\t\t\tfstart, fend );\n\t\t\t}\n\t\t\telse\n\t\t\t{\n\t\t\t\tif (wspace == FALSE)\n\t\t\t\t\tfprintf( nudesfile, \"repeat %3d %3d set coords wpelvis\\n\",\n\t\t\t\t\tfstart, fend );\n\t\t\t\telse\n\t\t\t\t\tfprintf( nudesfile, \"repeat %3d %3d set coords wspace\\n\",\n\t\t\t\t\tfstart, fend );\n\t\t\t}\n\t\t\t//\n\t\t\tif ( jc < 0 )\n\t\t\t{\n\t\t\t\tif ( ( ji <= 1 ) || ( ji == 3 ) || ( ji == 5 ) || ( ji > 11 ) )\n\t\t\t\t{\n\t\t\t\t\tprintf ( \"OOPS: ldotoetap left direction problem line %d\\n\", j );\n\t\t\t\t\tprintf ( \"%3d %3d %3d %3d %3d %3d %3d %3d %d\\n\", jm, ji, jx, jy, js, jw, jh, jb, jd );\n\t\t\t\t\tlgetout ( 1 );\n\t\t\t\t\tif ( ok == 1 ) return;\n\t\t\t\t} /* i wrong */\n\t\t\t\tfprintf ( nudesfile,\n\t\t\t\t\t\"repeat %3d %3d call forleft * left = b\\n\", fstart, fend );\n\t\t\t} /* left side */\n\t\t\telse if ( jc > 0 )\n\t\t\t{\n\t\t\t\tif ( ( ji < 1 ) || ( ji == 6 ) || \n\t\t\t\t\t( ji == 8 ) || ( ji == 10 ) || ( ji > 11 ) )\n\t\t\t\t{\n\t\t\t\t\tprintf ( \"OOPS: ldotoetap right direction problem line %d\\n\", j );\n\t\t\t\t\tprintf ( \"%3d %3d %3d %3d %3d %3d %3d %3d %d\\n\", jm, ji, jx, jy, js, jw, jh, jb, jd );\n\t\t\t\t\tlgetout ( 1 );\n\t\t\t\t\tif ( ok == 1 ) return;\n\t\t\t\t} /* i wrong */\n\t\t\t\tfprintf ( nudesfile,\n\t\t\t\t\t\"repeat %3d %3d call forright * right = b\\n\",fstart, fend );\n\t\t\t} /* right side */\n//\n\t\t\tif ( ji == 11 )\n\t\t\t{\n\t\t\t\tfprintf ( nudesfile, \"repeat %3d %3d call %s\\n\",\n\t\t\t\t\tfstart, fend, risesub[rise] );\n\t\t\t\tfprintf ( nudesfile,\n\t\t\t\t\t\"repeat %3d %3d set fend %d\\n\",\n\t\t\t\t\tfstart, fend, frange );\n\t\t\t\tfprintf ( nudesfile,\n\t\t\t\t\t\"linear %3d %3d bendto bleg bknee bthigh lrlx1 lrlx2 lrlx3\\n\",\n\t\t\t\t\tfstart, fend );\n\t\t\t} /* close without weight */\n\t\t\telse\n\t\t\t\tfprintf ( nudesfile,\n\t\t\t\t\t\"linear %3d %3d bendto bleg bknee bthigh lhig1 lhig2 lhig3\\n\",\n\t\t\t\t\tfstart, fend );\n\t\t\tfprintf ( nudesfile,\n\t\t\t\t\"linear %3d %3d drag bfoot bfoot bankle bleg x\\n\",\n\t\t\t\tfstart, fend );\n\t\t\tlbn[j].a = DONE;\n\t} /* c OK */\n} /* ldotoetaps */\n/**************************************/\n\nvoid laction(void)\n/*\n run through and interpret the actions\n\n called by linter,\n calls ldobar, ldosteps, lleggesture, ldolimb,\n ldopivot, lbent, lassign, lsetframes,\n lsethold, ldohold, lrelease, lface,\n\n#define FRONT 100 // front symbol found\n#define BACK 200 // back symbol found\n#define MLHAND 1 // man's left hand symbol found\n#define MRHAND 2 // man's right hand symbol found\n#define WLHAND 10 // woman's left hand symbol found\n#define WRHAND 20 // woman's right hand symbol found \n\t\n#define NO 0 // no hold\n#define CL 1 // closed hold\n#define PR 2 // promenade position\n#define CP 3 // counter promenade position\n#define DB 4 // double hold\n#define OP 5 // open hold\n#define CR 6 // crossed open hold\n#define OE 7 // open extended hold\n#define CO 8 // counter open extended hold\n#define SH 9 // shadow hold\n#define SS 10 // semi-shadow hold \n\nRelevant symbols:-\n m i\n Misc 1 bow\n Misc 2 release1\n Misc 3 release2\n Limb 4 lhand\n Limb 9 rhand\n Area 1 top/front \n Area 5 back/bottom\n\t Area 9 square\n Volm 1 RELAX\n Volm 3 BENT\n Volm 2 STRAIGHT\n Volm 4 STRETCH\n\t Volm 6 coordinates\n Volm 7 hold\n Face n facing direction\n\n*/\n{\n\tfprintf ( nudesfile, \"*\\n************************************\\n\" );\n\toriented = FALSE;\n\tif ( dofig == MAN )\n\t\tfprintf ( nudesfile, \"*\\nrepeat 0 %3d call doman\\n\", f_max );\n\telse\n\t\tfprintf ( nudesfile, \"*\\nrepeat 0 %3d call dowoman\\n\", f_max );\n\tfor ( j = 0; j < NCOLM; ++j )\n\t\tcolm[j] = ARM;\n\tfor ( j = 0; j < ssend; ++j )\n\t{\n\t\tlassign ();\n\t\tlsetframes ();\n\t\tfprintf(nudesfile,\"* %d %3d %s\",lbn[j].a,jc,lbnline[j]);\n\t\tif ( lbn[j].a == TODO )\n\t\t{\n\t\t\tif ( jm == Bars )\n\t\t\t\tldobar ();\n\t\t\telse if ( ( jm == Face ) || ( jm == Limb ) )\n\t\t\t\tlsethold ();\n\t\t\telse if ( jm == Misc )\n\t\t\t{\n\t\t\t\tlrelease ();\n\t\t\t}\n\t\t\telse if ( ( jc > -8 ) && ( jc < 8 ) )\n\t\t\t{\n\t\t\t\tif ( (( jm == Volm )&&( ji == 6 )) \n\t\t\t\t\t||(( jm == Area )&&( ji == 9 )) )\n\t\t\t\t\t\tlcoords(jm, ji);\n\t\t\t\tif ( ( jm == Rotn ) && ( jc > -4 ) && ( jc < 4 ) )\n\t\t\t\t\tldopivot ();\n\t\t\t\telse if (( jm == Dirn ) && ( jc > -4 ) && ( jc < 4 ))\n\t\t\t\t{\n\t\t\t\t\tldostep ();\n\t\t\t\t\tlleggesture ();\n\t\t\t\t\tldotoetaps ();\n\t\t\t\t}\n\t\t\t\telse\n\t\t\t\t\tldolimb ();\n\t\t\t} /* jc OK */\n\t\t} /* ja == TODO */\n\t\tif (( (jm == Dirn)||(jm == Rotn) )&&(jc >= -6)&&(jc <= 6)\n\t\t\t&&( nmw > 0 )&&( dofig == WOMAN ) )\n\t\t\tldohold ();\n\t\tpstart = fstart;\n\t\tpend = fend;\n\t} /* j */\n} /* laction */\n/*************************************************/\n\nvoid linter(void)\n/*\n linter\n\n interpret labanotation score into a NUDES file\n version linter50.c\n\n input : LED Labanotation file: standard input (led.lbn)\n output: NUDES animation script: standard output (led.n)\n\n called by main,\n calls lbnread, lsorty, lfindstaff, lstart, lhold,\n lfindystart, lcolx, lsetrange, lselectfig,\n lgetout, lcopyfigs, lfinish, lcopysubs,\n lbows,\n*/\n{\n lbnread();;\n lsorty();\n lfindstaff();\n lsetrange();\n lselectfig();\n lcopyfigs();\n lstart();\n lfindystart();\n lbows(); // flag hand signs\n lbent(); // flag dirn signs\n for (st = 0; st < nstaff; ++st)\n {\n hold = NO;\n holdcl = 0;\n holdco = 0;\n holdoe = 0;\n holdpr = 0;\n holdsh = 0;\n\n holdss = 0;\n facecl = 0;\n facepr = 0;\n facesh = 0;\n facess = 0;\n prevhold = -9;\n prevc = 0;\n pstart = -1;\n pend = -1;\n keptf = 0;\n gy = -1;\n gh = 0;\n if (staff[st][5] == TODO)\n {\n nbar = -1;\n if (staff[st][4] == MAN)\n dofig = MAN;\n else\n dofig = WOMAN;\n lcolx(staff[st][2]);\n laction();\n staff[st][5] = DONE;\n }\n }\n lfinish();\n} /* linter */\n/****************************************/\n\n\nvoid getout(int v)\n/*\n exit gracefully\n\n called by main, inlims, openfile, compl, nexts, doperfrm,\n initsphere, getkeys,\n*/\n{\n if (v != 0) \n {\n\t printf(\"lintel problem\\nok error number %d\\n\",ok);\n\t printf(\"line %d, action %d\\n%s\\n\",\n\t\t pp,p,aline[pp]);\n }\n if (infile) fclose(infile);\n ok = 1;\n} /* getout */\n/********************************************/\n\nvoid openfile(void)\n/*\n open the nudes file written by linter routine\n\n called by main,\n*/\n{\n if ((infile = fopen(nudesname,\"r\")) == NULL)\n {\n if (infile) fclose(infile);\n printf(\"\\n\\n %s oops?\\n\",nudesname);\n\t\t ok = 2;\n getout(1);\n if (ok == 1) goto rtrn;\n }\n printf(\"\\n opened %s\\n\",nudesname);\nrtrn: ;\n} /* openfile */\n/***************************************/\n\n\n\nvoid add_id_num ( char name[], char outname[], char ext[] )\n{\n\tFILE *test_File;\n\tint j;\n\n\tfor ( j = 0; j <= 999; j++ )\n\t{\n\t\tsprintf ( outname, \"%s_%03d%s\", name, j, ext );\n\n\t\tif ( ( test_File = fopen ( outname, \"r\" ) ) != NULL )\n\t\t{\n\t\t\tfclose ( test_File );\n\t\t}\n\t\telse\n\t\t{\n\t\t\treturn;\n\t\t}\n\t}\n\tsprintf ( outname, \"%s_000%s\", name, ext );\n} /* add_id_num */\n/*******************************************/\n\nint find_ini_title ( char title[] )\n/*\n called by get_ini_str, get_ini_bool, \n\t get_ini_char,\n*/\n{\n\tint value = -1;\n\tint ini_no;\n\tint j;\n\tint plen;\n\tint iplen;\n\tif ( number_ini <= 0 ) return( NULL );\n\tfor ( ini_no = 0; ini_no < number_ini; ini_no++ )\n\t{\n\t\tplen = (int)strlen ( title );\n\t\tiplen = 0;\n\t\tfor ( j = 0; j < plen; j++ )\n\t\t{\n\t\t\tif ( title[j] == ini_title[ini_no][j] )\n\t\t\t{\n\t\t\t\tiplen = iplen + 1;\n\t\t\t}\n\t\t}\n\t\tif ( iplen == plen )\n\t\t{\n\t\t\treturn( ini_no );\n\t\t}\n\t}\n\treturn( value );\n} /* find_ini_title */\n/************************************************/\n\nvoid get_ini_dump ( void )\n{\n\tint ini_no;\n\tprintf ( \" number ini %d\\n\", number_ini );\n\tif ( number_ini <= 0 ) return;\n\tfor ( ini_no = 0; ini_no < number_ini; ini_no++ )\n\t{\n\t\tprintf ( \" ini_no %2d title %s value %s\\n\",\n\t\t\tini_no, &ini_title[ini_no][0], &ini_value[ini_no][0] );\n\t}\n} /* get_ini_dump */\n/************************************************/\n\nbool get_if_ini ( void )\n{\n\tif ( number_ini > 0 ) return( true );\n\treturn( false );\n} /* get_if_ini */\n/************************************************/\n\nbool get_ini_bool ( char title[] )\n{\n\tbool value;\n\tint ini_no;\n\tvalue = -1;NULL;\n\tif ( number_ini <= 0 ) return( NULL );\n\tini_no = find_ini_title ( title );\n\tif ( ini_no < 0 ) return( NULL );\n\tif ( toupper( ini_value[ini_no][0] ) == 'T' )\n\t\treturn( true );\n\tif ( toupper( ini_value[ini_no][0] ) == 'F' )\n\t\treturn( false );\n\treturn( NULL );\n\n} /* get_ini_bool */\n/************************************************/\n\nchar* get_ini_char ( char title[] )\n/*\n calls find_ini_title, ini_value\n*/\n{\n\tchar* value;\n\tint ini_no;\n\tvalue = NULL;\n\tif ( number_ini <= 0 ) return( NULL );\n\tini_no = find_ini_title ( title );\n\tif ( ini_no < 0 ) return( NULL );\n\treturn( &ini_value[ini_no][0] );\n\n} /* get_ini_char */\n/************************************************/\n\nint get_ini_int ( char title[] )\n/*\n calls find_ini_title, ini_value\n*/\n{\n\tint value = 0;\n\tint ini_no;\n\tif ( number_ini <= 0 ) return( NULL );\n\tini_no = find_ini_title ( title );\n\tif ( ini_no < 0 ) return( NULL );\n\tvalue = atoi ( &ini_value[ini_no][0] );\n\t//printf ( \" ini_no %d value %d\\n\", ini_no, value );\n\treturn( value );\n\n} /* get_ini_int */\n/************************************************/\n\nfloat get_ini_float ( char title[] )\n/*\n calls find_ini_title, ini_value\n*/\n{\n\tfloat value = 0.0f;\n\tint ini_no;\n\tif ( number_ini <= 0 ) return( NULL );\n\tini_no = find_ini_title ( title );\n\tif ( ini_no < 0 ) return( NULL );\n\tvalue = atof ( &ini_value[ini_no][0] );\n\treturn( value );\n} /* get_ini_float */\n/************************************************/\n\ndouble get_ini_double ( char title[] )\n/*\n calls find_ini_title, ini_value\n*/\n{\n\tdouble value = 0.0;\n\tint ini_no;\n\tif ( number_ini <= 0 ) return( NULL );\n\tini_no = find_ini_title ( title );\n\tif ( ini_no < 0 ) return( NULL );\n\tvalue = strtod ( &ini_value[ini_no][0], NULL );\n\treturn( value );\n} /* get_ini_double */\n/************************************************/\n\nbool get_ini_str ( char title[], char value[] )\n/*\n calls find_ini_title, ini_value\n*/\n{\n\t//char* value;\n\tint ini_no;\n\tint i;\n\tint len;\n\n\tvalue[0] = NULL;\n\tif ( number_ini <= 0 ) return( false );\n\tini_no = find_ini_title ( title );\n\tif ( ini_no < 0 ) return( false );\n\tlen = (int)strlen( &ini_value[ini_no][0] );\n\tif ( len <= 0 ) return( false );\n\ti = -1;\n\tdo\n\t{\n\t\ti = i + 1;\n\t\tvalue[i] = ini_value[ini_no][i];\t\t\n\t} while ( ini_value[ini_no][i] != NULL );\n\n\treturn( true );\n\n} /* get_ini_str */\n/************************************************/\n\nvoid get_ini ( int dump )\n/*\n open and decode .ini file\n\n called by main,\n*/\n{\n\tFILE *ini_file_unit;\n\tint ini_no;\n\tint j;\n\tint k;\n\tint len;\n\tint loc_comma;\n\tint loc_semi;\n\tchar asterisk = '*';\n\tchar blank = ' ';\n\tchar comma = ',';\n\tchar semi = ';';\n\tbool getout;\n\n\tfor ( ini_no = 0; ini_no < max_ini; ini_no++ )\n\t{\n\t\tini_title[ini_no][0] = NULL;\n\t\tini_value[ini_no][0] = NULL;\n\t}\n\n\tnumber_ini = -1;\n\tini_diag = 0;\n\tini_file_unit = NULL;\n\n\t// open ini file - check if it exists\n\n\tif ( ( ini_file_unit = fopen ( \"lintel.ini\", \"r\" ) ) == NULL )\n\t{\n\t\tif ( ini_file_unit ) fclose ( ini_file_unit );\n\t\tprintf ( \"\\n\\n %s\\n\\n\",\n\t\t\t\"lintel.ini not available - will continue\" );\n\t}\n\telse\n\t{\n\t\tini_no = 0;\n\t\tnumber_ini = ini_no;\n\t\tlen = -1;\n\t\tdo\n\t\t{\n\t\t\tini_title[ini_no][0] = NULL;\n\t\t\tini_value[ini_no][0] = NULL;\n\n\t\t\tif ( fgets ( buf, BMAX, ini_file_unit ) != NULL )\n\t\t\t{\n\t\t\t\tif ( ini_diag >= 1 )\n\t\t\t\t\tprintf ( \" ini_no %2d buf %s\", ini_no, buf );\n\t\t\t\tif ( buf[0] != asterisk )\n\t\t\t\t{\n\t\t\t\t\tif ( ini_diag >= 1 )\n\t\t\t\t\t\tprintf ( \" ini_no %2d buf %s\", ini_no, buf );\n\t\t\t\t\tloc_comma = -1;\n\t\t\t\t\tloc_semi = -1;\n\t\t\t\t\tgetout = false;\n\t\t\t\t\tlen = (int)strlen( buf );\n\t\t\t\t\tif ( ini_diag >= 1 ) printf ( \" len %d\\n\", len );\n\t\t\t\t\tfor ( j = 0; j < len; j++ )\n\t\t\t\t\t{\n\t\t\t\t\t\tif ( buf[j] == comma && loc_semi == -1 ) loc_comma = j;\n\t\t\t\t\t\tif ( buf[j] == semi )\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tloc_semi = j;\n\t\t\t\t\t\t\tgetout = true;\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif ( getout == true ) break;\n\t\t\t\t\t}\n\t\t\t\t\tif ( ini_diag >= 1 )\n\t\t\t\t\t\tprintf ( \" loc_comma %d loc_semi %d\\n\",\n\t\t\t\t\t\t\tloc_comma, loc_semi );\n\n\t\t\t\t\t// get parameter title\n\n\t\t\t\t\tk = 0;\n\t\t\t\t\tfor ( j = 0; j < loc_comma; j++ )\n\t\t\t\t\t{\n\t\t\t\t\t\tif ( buf[j] != blank )\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tini_title[ini_no][k] = buf[j];\n\t\t\t\t\t\t\tk = k + 1;\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\t\t\t\t\tini_title[ini_no][k] = NULL;\n\n\t\t\t\t\t// get parameter value\n\n\t\t\t\t\tk = 0;\n\t\t\t\t\tfor ( j = loc_comma + 1; j < loc_semi; j++ )\n\t\t\t\t\t{\n\t\t\t\t\t\tif ( buf[j] != blank )\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tini_value[ini_no][k] = buf[j];\n\t\t\t\t\t\t\tif ( ini_diag > 1 )\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tprintf( \" j %d k %d buf[j] %c ini %c\\n\",\n\t\t\t\t\t\t\t\t\tj, k, buf[j],ini_value[ini_no][k] );\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tk = k + 1;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tini_value[ini_no][k] = NULL;\n\t\t\t\t\tini_no = ini_no + 1;\n\t\t\t\t}\n\t\t\t\telse\n\t\t\t\t{\n\t\t\t\t\tif ( buf[1] == 'd' && buf[2] == 'u' \n\t\t\t\t\t\t&& buf[3] == 'm' && buf[4] == 'p' )\n\t\t\t\t\t\tdump = 1;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\twhile ( !feof( ini_file_unit ) && len != 0 );\n\t\tnumber_ini = ini_no;\n\t}\n\tif ( dump == 1 ) get_ini_dump ();\n} /* get_ini */\n/************************************************/\n\nbool strcmpend ( char str1[], char str2[] )\n/*\n\tcompare strings to see if str2 is included at end of str1\n*/\n{\n\tint len1, len2;\n\tint i1, i2;\n\tint cnt;\n\n\tlen1 = (int)strlen( str1 );\n\tlen2 = (int)strlen( str2 );\n\n\tcnt = 0;\n\ti2 = len2 - 1;\n\tfor ( i1 = len1 - 1; i1 >= len1 - len2; i1-- )\n\t{\n\t\tif ( str1[i1] == str2[i2] ) cnt = cnt + 1;\n\t\ti2 = i2 - 1;\n\t}\n\tif ( cnt == len2 )\t\treturn( true );\n\n\treturn( false );\n}/* strcmpend */void get_filesa ( bool lbn_type, int error )\n{\n\tprintf( \"\\n\" );\n\tif ( error == 0 )\n\t{\n\t\tprintf( \" Please type input filename followed by pressing the 'enter' key\\n\\n\" );\n\t}\n\telse\n\t{\n\t\tprintf( \"\\n\" );\n\t\tprintf( \" Please type a correct input filename\\n\\n\" );\n\t}\n\n\tif ( lbn_type == true )\n\t{\n\t\tprintf( \" NUDES file (xxx.nud or xxx.n)\\n\" );\n\t\tprintf( \" - full filename (xxx.nud or xxx.n)\\n\" );\n\t\tprintf( \" LBN file (yyy.lbn)\\n\" );\n\t\tprintf( \" - root portion (yyy) of filename\\n\" );\n\t\tprintf( \" (interprets staves 1 and 2 with figure tracking)\\n\" );\n\t\tprintf( \" - full filename (yyy.lbn)\\n\" );\n\t\tprintf( \" (choice of staves, choice of tracking)\\n\\n\" );\n\t\tprintf( \" Filename: \" );\n\t}\n\n\n\tif ( lbn_type == false )\n\t{\n\t\tprintf( \" Filename: \" );\n\t}\n} /* get_filesa */\n/************************************************/\n\nvoid get_files ( char file[] )\n/*\n called by main\n\tcalls get_filesa, strcmpend, bell, add_id_num,\n*/\n{\n\tint c;\n\tint i;\n\tint len;\n\tint last;\n\tint err_count;\n\tint error;\n\tint loc_dot;\n\tint from_ini;\n\tchar key;\n\tbool get_out;\n\tbool ini_ok;\n\tbool file_ok;\n\tbool dir_ok;\n\tbool lbn_type;\n\tchar dir[BMAX];\n\n\tfrom_ini = 0;\n\terr_count = 0;\n\terror = 0;\n\tget_out = false;\n\tini_ok = false;\n\tfile_ok = false;\n\tdir_ok = false;\n\tlbn_type = true;\nstart:\n\terr_count = err_count + 1;\n\tif ( err_count >= 25 ) \n\t{\n\t\tprintf( \" Limit: tried %d times for input file %s\\n\",\n\t\t\terr_count,name );\n\t\tok = -1;\n\t\treturn;\n\t}\n\tinput_file_type = -1;\n\tfor ( c = 0; c < BMAX; ++c )\n\t{\n\t\tname[c] = NULL;\n\t\tfinname[c] = NULL;\n\t\tnudesname[c] = NULL;\n\t}\n\n\tif ( file == NULL )\n\t{\n\t\tfile_ok = false;\n\t\tif ( from_ini == 0 )\n\t\t{\n\t\t\tif ( get_if_ini () == true )\n\t\t\t{\n\t\t\t\tini_ok = get_ini_bool ( \"input_file_default\" );\n\t\t\t\tif ( ini_ok == true ) \n\t\t\t\t{\n\t\t\t\t\tfile_ok = get_ini_str ( \"input_file_name\", name );\n\t\t\t\t\tdir_ok = get_ini_str ( \"input_file_dir\", dir );\n\t\t\t\t\tif ( dir[0] == NULL ) dir_ok = false;\n\t\t\t\t\tlen = (int)strlen( dir );\n\t\t\t\t\tif ( dir_ok == true && dir[len - 1] != '\\\\' )\n\t\t\t\t\t\tdir[len - 1] = '\\\\';\n\t\t\t\t\tlbn_type = get_ini_bool ( \"lbn_file_encoded\" );\n\t\t\t\t\tfrom_ini = 1;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif ( file_ok == false )\n\t\t{\n\t\t\tname[0] = NULL;\n\t\t\tget_filesa ( lbn_type, error );\n\t\t\tif ( gets ( name ) != NULL )\n\t\t\t{\n\t\t\t\tlen = (int)strlen( name );\n\t\t\t\tif ( len == 0 )\n\t\t\t\t{\n\t\t\t\t\tget_out = true;\n\t\t\t\t\terror = 1;\n\t\t\t\t\tgoto start; \n\t\t\t\t}\n\t\t\t}\n\t\t\telse\n\t\t\t{\n\t\t\t\tget_out = true;\n\t\t\t\terror = 1;\n\t\t\t\tgoto start; \n\t\t\t}\n\t\t}\n\t}\n\telse\n\t{\n\t\tstrcpy ( name, file );\n\t}\n\n\tlen = (int)strlen( name );\n\tlast = len - 1;\n\n\tloc_dot = -1;\n\ti = -1;\n\tdo\n\t{\n\t\ti = i + 1;\n\t\tkey = name[i];\n\t\tif ( key == '.' ) loc_dot = i;\n\t} while ( key != NULL );\n\n\tif ( loc_dot >= 0 ) loc_dot = last - loc_dot;\n\n\tinput_file_type = -1;\n\thaslbn = FALSE;\n\tget_out = false;\n\n\tif ( lbn_type == true ) // use filename to decide lbn type\n\t{\n\t\tswitch ( loc_dot )\n\t\t{\n\t\tcase 3:\n\t\t\t// .nud extention\n\t\t\tif ( strcmpend ( name, \".nud\" ) ) \n\t\t\t{\n\t\t\t\tinput_file_type = 0;\n\t\t\t\thaslbn = FALSE;\n\t\t\t}\n\t\t\t// .lbn extention\n\t\t\tif ( strcmpend ( name, \".lbn\" ) )\n\t\t\t{\n\t\t\t\tinput_file_type = 1;\n\t\t\t\thaslbn = TRUE;\n\t\t\t}\n\t\t\tif ( input_file_type < 0 ) get_out = true;\n\t\t\tbreak;\n\t\tcase 2:\n\t\t\t// problem\n\t\t\tget_out = true;\n\t\t\tbreak;\n\t\tcase 1:\n\t\t\t// .n extention\n\t\t\tif ( strcmpend ( name, \".n\" ) )\n\t\t\t{\n\t\t\t\tinput_file_type = 0;\n\t\t\t\thaslbn = FALSE;\n\t\t\t}\n\t\t\telse\n\t\t\t{\n\t\t\t\tget_out = true;\n\t\t\t}\n\t\t\tbreak;\n\t\tcase 0:\n\t\t\t// . extention\n\t\t\tif ( strcmpend ( name, \".\" ) )\n\t\t\t{\n\t\t\t\tinput_file_type = 2;\n\t\t\t\tstrcat( name, \"lbn\" );\n\t\t\t\thaslbn = TRUE;\n\t\t\t}\n\t\t\telse\n\t\t\t{\n\t\t\t\tget_out = true;\n\t\t\t}\n\t\t\tbreak;\n\t\tcase -1:\n\t\t\t// no extention\n\t\t\tif ( len > 0 && !strcmpend ( name, \".\" ) )\n\t\t\t{\n\t\t\t\tinput_file_type = 2;\n\t\t\t\tstrcat( name, \".lbn\" );\n\t\t\t\thaslbn = TRUE;\n\t\t\t}\n\t\t\telse\n\t\t\t{\n\t\t\t\tget_out = true;\n\t\t\t}\n\t\t\tbreak;\n\t\tdefault:\n\t\t\tget_out = true;\n\t\t\tbreak;\n\t\t}\n\t}\n\tif ( get_out == true )\n\t{\n\t\tif ( from_ini == 1 ) \n\t\t{\n\t\t\tprintf(\"\\n\\nFile: %s from lintel.ini is not available.\\n\",\n\t\t\t\tname );\n\t\t\tfrom_ini = -1;\n\t\t}\n\t\terror = 1;\n\t\tname[0] = NULL;\n\t\tgoto start;\n\t}\n\n\t// add directory to filename\n\tif ( dir_ok == true )\n\t{\n\t\tstrcat ( dir, name );\n\t\tstrcpy ( name, dir );\n\t}\n\tprintf( \"\\n \" );\n\tif ( input_file_type == 0 )\n\t{\n\t\tsprintf ( nudesname, \"%s\", name );\n\t\tif ( ( infile = fopen( nudesname, \"r\" ) ) == NULL )\n\t\t{\n\t\t\tif ( infile ) fclose ( infile );\n\t\t\tprintf ( \"\\n\\n %s OOPS?\\n\", nudesname );\n\t\t\tbell ( 1, 1 );\n\t\t\tif ( from_ini == 1 ) \n\t\t\t{\n\t\t\t\tfrom_ini = -1;\n\t\t\t}\n\t\t\tgoto start;\n\t\t}\n\t\tprintf ( \" Opened %s\\n\", nudesname );\n\t}\n\telse if ( input_file_type > 0 )\n\t{\n\t\tstrcpy( finname, name );\n\n\t\tif ( ( infile = fopen ( finname, \"r\" ) ) == NULL )\n\t\t{\n\t\t\tif ( infile ) fclose ( infile );\n\t\t\tprintf ( \"\\n %s ? OOPS - file not found.\\n\", finname );\n\t\t\tbell ( 1, 1 );\n\t\t\tif ( from_ini == 1 ) \n\t\t\t{\n\t\t\t\tprintf( \"\\n\\n File: %s from lintel.ini is not available.\\n\", name );\n\t\t\t\tfrom_ini = -1;\n\t\t\t}\n\t\t\tgoto start;\n\t\t}\n\n\t\tprintf ( \"\\n opened input file %s\\n\", finname );\n\n\t\tadd_id_num ( name, nudesname, \".n\" );\n\t\tif ( ( nudesfile = fopen ( nudesname, \"w\" ) ) == NULL )\n\t\t{\n\t\t\tif ( nudesfile ) fclose ( nudesfile );\n\t\t\tprintf ( \"\\n\\n %s OOPS?\\n\", nudesname );\n\t\t\tbell ( 1, 1 );\n\t\t\tgoto start;\n\t\t}\n\t\tprintf ( \"\\n created nudes file %s\\n\", nudesname );\n\t}\n\tif ( ( infile = fopen(nudesname, \"r\" ) ) == NULL )\n\t{\n\t\tif ( infile ) fclose ( infile );\n\t\tprintf ( \"\\n\\n %s OOPS?\\n\", nudesname );\n\t\tbell ( 1, 1 );\n\t\tgoto start;\n\t}\n} /* get_files */\n/************************************************/\n\nbool led_opena ( int min_fps, int max_fps, int min_beats, int max_beats )\n{\n\tbool get_out;\n\tget_out = true;\n\tif ( lbn_fps < min_fps || lbn_fps > max_fps )\n\t{\n\t\tprintf ( \"\\n Oops: fps value is %d but must be between %d and %d\\n\", lbn_fps, min_fps, max_fps );\n\t\tget_out = false;\n\t}\n\tif ( lbn_bpm < min_beats || lbn_bpm > max_beats )\n\t{\n\t\tif ( lbn_bpm < 0 )\n\t\t{\n\t\t\tprintf ( \"\\n Oops: bpm value missing\\n\" );\n\t\t}\n\t\telse\n\t\t{\n\t\t\tprintf ( \"\\n Oops: bpm value is %d but must be between %d and %d\\n\", lbn_bpm, min_beats, max_beats );\n\t\t}\n\t\tget_out = false;\n\t}\n\treturn( get_out );\n} /* led_opena */\n/********************************************/\n\nvoid led_param ( void )\n/*\n set up parameters of .lbn interpretation from .ini file\n\n called by main\n*/\n{\n\tbool get_out;\n\tint min_fps;\n\tint max_fps;\n\tint min_beats;\n\tint max_beats;\n\tint lbn_figures_in;\n\tbool lbn_default;\n\tint lbn_fps_in;\n\tint lbn_bpm_in;\n\n\tlbn_fps = -1;\n\tlbn_bpm = -1;\n\tlbn_ppb = 23;\n\tmin_fps = 1;\n\tmax_fps = 250;\n\tmin_beats = 25;\n\tmax_beats = 250;\t\n\tlbn_default = false;\n\tlbn_figures = 1;\n\tif ( get_if_ini () == true )\n\t{\n\t\tlbn_figures_in = get_ini_int ( \"lbn_figures\" );\n\t\tlbn_default = get_ini_bool ( \"lbn_default\" );\n\t\tlbn_fps_in = get_ini_int ( \"lbn_fps\" );\n\t\tlbn_bpm_in = get_ini_int ( \"lbn_bpm\" );\n\n\t\tif ( lbn_fps_in < min_fps || lbn_fps_in > max_fps \n\t\t\t|| lbn_bpm_in < min_beats || lbn_bpm_in > max_beats )\n\t\t\t\tlbn_default = false;\n\t\tif ( lbn_default == true )\n\t\t{\n\t\t\tlbn_fps = lbn_fps_in;\n\t\t\tlbn_bpm = lbn_bpm_in;\n\t\t\tlbn_figures = lbn_figures_in;\n\t\t}\n\t}\n\n\tif ( lbn_default == false )\n\t{\n\t\tget_out = false;\n\t\tdo\n\t\t{\n\t\t\tprintf ( \"\\n Please enter frames/second (%d-%d)\", min_fps, max_fps );\n\t\t\tprintf ( \"\\n and beats/minute (%d-%d)\", min_beats, max_beats );\n\t\t\tprintf ( \"\\n separated by a space\\n :\" );\n\t\t\tif ( gets ( buf ) != NULL && buf[0] != 0 )\n\t\t\t{\n\t\t\t\tsscanf ( buf, \"%d %d\", &lbn_fps, &lbn_bpm );\n\t\t\t\tget_out = led_opena ( min_fps, max_fps, min_beats, max_beats);\n\t\t\t}\n\t\t\telse\n\t\t\t{\n\t\t\t\tlbn_fps = 25;\n\t\t\t\tlbn_bpm = 120;\n\t\t\t\tprintf ( \"\\n Oops: cannot read fps and bpm\" );\n\t\t\t\tprintf ( \"\\n values set to %d and %d respectively\\n\",\n\t\t\t\t\tlbn_fps, lbn_bpm );\n\t\t\t\tget_out = true;\n\t\t\t}\n\t\t} while ( get_out == false );\n\t}\n\tlbn_fpp = double(lbn_fps)*doub60\n\t\t\t\t\t/ (double(lbn_bpm)*double(lbn_ppb));\n\tprintf(\"\\n frames/pixel %g, fps %d, bpm %d, ppb %d\\n\",\n\t\t\t\tlbn_fpp, lbn_fps, lbn_bpm, lbn_ppb );\n\tprintf(\" number of figures %d\\n\",lbn_figures);\n}/* led_param */\n/************************************************/\n\n\n\nint main(int argc, char* argv[])\n/*\n calls initialise, lgetfiles, linter, openfile, compl,\n doframes, initsphere, initgraphics,\n checkeys, image, animate, getout,\n help, gluInit, glutKeyboardFunc, glutDisplayFunc,\n glutIdleFunc, glutMainLoop,\n\t\t\tget_ini, get_files, led_param\n*/\n{\n\tsprintf(ptitle,\"lintel084\");\n\tprintf(\"\\n %s running\\n\",ptitle);\n\nmore:\n\tinitialise();\n\tget_ini ( 0 );\n\tled_param();\n\tget_files ( NULL );\n\tif ( ok != 0 ) goto more;\n\tif (haslbn == TRUE)\n\t{\n fprintf(nudesfile,\n \"*\\n* created %s from %s using %s\\n*\\n\",\n nudesname,name,ptitle);\n linter();\n\t}\n\t/*\n\tfstart = 0;\n\tif (ok == 0) openfile(); \n\t else \n\t if (ok != 1) getout(1);\n\t else goto more;\n\tcompl1();\n\tif (ok == 0) doframes();\n else \n if (ok != 1) getout(1);\n else goto more;\n\tif (ok == 0)\n\t initsphere();\n else \n if (ok != 1) getout(1);\n else goto more;\n\tglutInit(&argc, argv); \n\tinitgraphics(); \n\tprintf(\"For interactive command list:\\n\");\n\tprintf(\" click in animation window, then press 'h' key\\n\");\n\tglutKeyboardFunc(checkeys); // register Keyboard handler \n\tglutDisplayFunc(image); // register Display handler \n\tglutIdleFunc(animate);\n\tglutMainLoop();\n\tgoto more;*/\n}\n\n\n\n" }, { "alpha_fraction": 0.31831830739974976, "alphanum_fraction": 0.3588588535785675, "avg_line_length": 13.800000190734863, "blob_id": "a53995bb0aff6171e6748208a23ab4398e8e34c2", "content_id": "6774820df8c9e1c6481635d8da88423ad270edbe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 666, "license_type": "no_license", "max_line_length": 61, "num_lines": 45, "path": "/docs/bfs-dfs.py", "repo_name": "csmagic/pucsd-iop", "src_encoding": "UTF-8", "text": "def dfs(t):\n (val, tl) = t\n if tl == []:\n return [val]\n else :\n return [val] + [x for ll in map (dfs, tl)for x in ll]\n\n\n \ndef bfs(t):\n r= []\n q = [t]\n while(q!=[]):\n (x,ctl) = q[0]\n q = q[1:]+ctl\n r = r +[x]\n return r\n\n\ndef dfsi(t):\n r = []\n s = [t]\n while(s!=[]):\n (x,ctl) = s[0]\n s = ctl+s[1:]\n r = r +[x]\n return r\n\ndef bfsp(t):\n return bfs1([t],[])\n\ndef bfs1(q,r):\n if (q==[]):\n return r\n else:\n (hd, cl) = q[0]\n tl = q[1:]\n return bfs1(tl+cl, r+[hd])\n \nt2= (2,[])\nt4= (4,[])\nt5= (5,[t2])\nt6= (6,[])\nt3= (3,[t5,t6])\nt1 =(1,[t2, t3,t4])\n" }, { "alpha_fraction": 0.3745020031929016, "alphanum_fraction": 0.45418328046798706, "avg_line_length": 15.666666984558105, "blob_id": "a5568ed5bc16c1f9d201c3c4a101f9ff5c2ac8b4", "content_id": "3045c9dc61fc4d62650cd4f0bdbf826d6db707c7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 251, "license_type": "no_license", "max_line_length": 58, "num_lines": 15, "path": "/14125/treeTriversal.py", "repo_name": "csmagic/pucsd-iop", "src_encoding": "UTF-8", "text": "\ndef dfs(tree):\n (val,tl)=tree\n if tl == []:\n return [val]\n else:\n return [val]+ [x for y in map (dfs,tl) for x in y]\n\nt2= (2,[])\nt4= (4,[])\nt5= (5,[t2])\nt6= (6,[])\nt3= (3,[t5,t6])\nt1 =(1,[t2, t3,t4])\nprint (dfs(t1))\nprint (t1)\n" }, { "alpha_fraction": 0.6393442749977112, "alphanum_fraction": 0.7213114500045776, "avg_line_length": 19.33333396911621, "blob_id": "460235deb3540ddef0bca23288138d8d7393cc06", "content_id": "592ecce9246870fcf5c03714d51c75793bdbd61c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 61, "license_type": "no_license", "max_line_length": 41, "num_lines": 3, "path": "/15127/README.md", "repo_name": "csmagic/pucsd-iop", "src_encoding": "UTF-8", "text": "# Dnyaneshwar Ishwar Mahajan :- 15127\n\nThis folder contain all assignment codes.\n" }, { "alpha_fraction": 0.7068965435028076, "alphanum_fraction": 0.7758620977401733, "avg_line_length": 18.33333396911621, "blob_id": "73efb8e228135d46c5b643b113e885225ff6cb76", "content_id": "d644b99f7f28a5fbe1a9a0633c7b3696b4c5feae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 58, "license_type": "no_license", "max_line_length": 48, "num_lines": 3, "path": "/15104/README.md", "repo_name": "csmagic/pucsd-iop", "src_encoding": "UTF-8", "text": "#Bhand Vishnu\n\nThis Directory contains assignments of IOP-2017.\n" }, { "alpha_fraction": 0.807692289352417, "alphanum_fraction": 0.807692289352417, "avg_line_length": 25, "blob_id": "551da5644a9b1dc2797e6d4b1bb30c40a9829049", "content_id": "03b74c8b9a79c79764ac45a81fa44dd9709d9442", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 52, "license_type": "no_license", "max_line_length": 39, "num_lines": 2, "path": "/README.md", "repo_name": "csmagic/pucsd-iop", "src_encoding": "UTF-8", "text": "# pucsd-iop\npucsd Idioms-of-Programing code sharing\n" } ]
10
Jean-Sim/linear-algbra-collection
https://github.com/Jean-Sim/linear-algbra-collection
89ec63ef7127246357f5c5edabe1067a77990e0c
425345d27e261e1041ad251b80fb0b5ef2d235a0
edcdcf0e0d7dba648194bb15bb79419492ee00e3
refs/heads/master
2021-08-07T23:32:59.593423
2021-07-12T18:32:46
2021-07-12T18:32:46
231,160,979
9
3
null
2020-01-01T00:45:15
2020-09-12T19:34:33
2020-09-18T16:30:59
Python
[ { "alpha_fraction": 0.5507246255874634, "alphanum_fraction": 0.5688405632972717, "avg_line_length": 30.47058868408203, "blob_id": "d80e55c7c5852e2d132fb9e70e7efd127f7aa292", "content_id": "1c9c491a0af2e98a3bc59318f6f9f22ab6331def", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 552, "license_type": "no_license", "max_line_length": 67, "num_lines": 17, "path": "/web-app/web-app-modules/matrix_matrix_multiplikation.py", "repo_name": "Jean-Sim/linear-algbra-collection", "src_encoding": "UTF-8", "text": "def MM_multiplication(vec):\r\n #needs square matrix\r\n from content.modules_for_Web_app.web_app_tool import final_step\r\n matrix_1, matrix_2 = final_step(vec)\r\n\r\n end_matrix = []\r\n for element in range(len(matrix_1)):\r\n end_matrix.append([])\r\n for none in matrix_1:\r\n end_matrix[element].append(0)\r\n\r\n for i in range(len(matrix_1)):\r\n for j in range(len(matrix_1)):\r\n for k in range(len(matrix_1)):\r\n end_matrix[i][j] += matrix_1[i][k] * matrix_2[k][j]\r\n\r\n return end_matrix\r\n" }, { "alpha_fraction": 0.4645760655403137, "alphanum_fraction": 0.47386759519577026, "avg_line_length": 31.038461685180664, "blob_id": "d4a59a97d0fcbd650b1a7a54baa7b6e56ce292e9", "content_id": "d6ca7343e3384fcb325aedab75f8393a3b8c2621", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1722, "license_type": "no_license", "max_line_length": 114, "num_lines": 52, "path": "/individual modules/determinant.py", "repo_name": "Jean-Sim/linear-algbra-collection", "src_encoding": "UTF-8", "text": "\r\ndef determinant(M):\r\n def normalise(M):\r\n new_M = []\r\n # creats a new Matrix\r\n for element in range(len(M)):\r\n new_M.append([])\r\n # appand new sub vectore \r\n for element2 in range(len(M)):\r\n # inserts the element in correct position \r\n new_M[element].append(M[element2][element])\r\n return new_M\r\n # changes Matrix in to the right shape \r\n\r\n def rowMod(M, i, j, x):\r\n M[i] = [a + x * b for a, b in zip(M[i], M[j])]\r\n # modefies matrix element \r\n\r\n def rowEchelon(M):\r\n M = normalise(M)\r\n # uses noramlise to make matrix fit the algorithem \r\n row, col = 0, 0\r\n rows, cols = len(M), len(M[0])\r\n # defines size of matrix \r\n while row < rows and col < cols:\r\n # starts while loop witch continues until matrix in echelon form \r\n if M[row][col] == 0:\r\n for r in range(row + 1, rows):\r\n if M[r][col] != 0:\r\n rowMod(M, row, r, 1)\r\n break\r\n\r\n if M[row][col] == 0:\r\n col += 1\r\n continue\r\n pivot = M[row][col]\r\n\r\n for r in range(row + 1, rows):\r\n if M[r][col] != 0:\r\n rowMod(M, r, row, -M[r][col] / pivot)\r\n\r\n row += 1\r\n col += 1\r\n\r\n return normalise(M)\r\n #to compute the determinant we just bring the matrix in to echelon form and then multiply all diagunal values \r\n\r\n num = 1\r\n new_M = rowEchelon(M)\r\n for element in range(len(M)):\r\n num = num*new_M[element][element]\r\n # multiplies elements diagonally\r\n return num\r\n\r\n" }, { "alpha_fraction": 0.4749498963356018, "alphanum_fraction": 0.5511022210121155, "avg_line_length": 36.230770111083984, "blob_id": "bfa85b60884181b905024d36847caac49bf1ef50", "content_id": "466be0bcbe63b16fde43e7b687a7c779ef55130a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 499, "license_type": "no_license", "max_line_length": 79, "num_lines": 13, "path": "/web-app/web-app-modules/eigenvalue_for_2x2.py", "repo_name": "Jean-Sim/linear-algbra-collection", "src_encoding": "UTF-8", "text": "def eigenvalue_2x2(matrix):\r\n from content.modules_for_Web_app.web_app_tool import string_matrix\r\n matrix = string_matrix(matrix)\r\n\r\n import math\r\n\r\n result1 = (matrix[0][0] + matrix[1][1]) / 2 + math.sqrt(\r\n ((matrix[0][0] + matrix[1][1]) / 2) ** 2 - matrix[0][0] * matrix[1][1])\r\n\r\n result2 = (matrix[0][0] + matrix[1][1]) / 2 - math.sqrt(\r\n ((matrix[0][0] + matrix[1][1]) / 2) ** 2 - matrix[0][0] * matrix[1][1])\r\n\r\n return round(result1, 5), round(result2, 5)\r\n\r\n" }, { "alpha_fraction": 0.6288951635360718, "alphanum_fraction": 0.6288951635360718, "avg_line_length": 34.29999923706055, "blob_id": "806b4a633337058fe0ddf269a44cf6666b990ced", "content_id": "02ca893607e284c59b90e1b533a49736954b79ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 353, "license_type": "no_license", "max_line_length": 57, "num_lines": 10, "path": "/web-app/str_vec.py", "repo_name": "Jean-Sim/linear-algbra-collection", "src_encoding": "UTF-8", "text": "def string_vector(lis):\n lis = lis.strip(\"[\")\n lis = lis.strip(\"]\")\n # removes all unnecessary brackets from string vector\n lis = lis.split(\",\")\n # splits the vector in string form into an array \n for element in range(len(lis)):\n lis[element] = float(lis[element])\n # turns the number string in to numbers\n return lis\n" }, { "alpha_fraction": 0.4478584825992584, "alphanum_fraction": 0.4525139629840851, "avg_line_length": 34.82758712768555, "blob_id": "e3a773cc068f5c6cc53fbf817963e9b92e74c8e9", "content_id": "765a92ef604cdee924db78d41f0c1cbf1a406488", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1074, "license_type": "no_license", "max_line_length": 79, "num_lines": 29, "path": "/web-app/web-app-modules/solve_eq.py", "repo_name": "Jean-Sim/linear-algbra-collection", "src_encoding": "UTF-8", "text": "def solve_eq(inp):\r\n from content.modules_for_Web_app.web_app_tool import final_step\r\n result, matrix = final_step(inp)\r\n def abs(lis):\r\n if lis < 0:\r\n lis = lis*-1\r\n return lis\r\n\r\n for k in range(len(result)):\r\n if abs(matrix[k][k]) < 0:\r\n for i in range(k+1, len(result)):\r\n if abs(matrix[i][k]) > abs(matrix[k][k]):\r\n for j in range(k,len(result)):\r\n matrix[k][j], matrix[i][j] = matrix[i][j], matrix[k][j]\r\n result[k], result[i] = result[i], result[k]\r\n break\r\n pivot = matrix[k][k]\r\n for j in range(k, len(result)):\r\n matrix[k][j] /= pivot\r\n result[k] /= pivot\r\n\r\n for i in range(len(result)):\r\n if i == k or matrix[i][k] == 0:\r\n continue\r\n factore = matrix[i][k]\r\n for j in range(k, len(result)):\r\n matrix[i][j] -= factore * matrix[k][j]\r\n result[i] -= factore*result[k]\r\n return matrix, result\r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.5759312510490417, "alphanum_fraction": 0.5988538861274719, "avg_line_length": 32.70000076293945, "blob_id": "2adefd64146cc17427804927f1018525a6c5280a", "content_id": "0b4d55f5bb2cc0a9aa0f12fb0058c16739e2a047", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 349, "license_type": "no_license", "max_line_length": 50, "num_lines": 10, "path": "/individual modules/vector_addition.py", "repo_name": "Jean-Sim/linear-algbra-collection", "src_encoding": "UTF-8", "text": "def vector_addition(vector_1, vector_2):\r\n for element in range(len(vector_2)):\r\n # goes through all elements in Vector\r\n try:\r\n vector_1[element] += vector_2[element]\r\n except IndexError:\r\n vector_1.append(vector_2[element])\r\n # excepts if Vectors have diffrent length\r\n\r\n return vector_1\r\n\r\n" }, { "alpha_fraction": 0.6189427375793457, "alphanum_fraction": 0.6365638971328735, "avg_line_length": 20.619047164916992, "blob_id": "a4796f873a58a1b191ebdb292daeae9c3ca04544", "content_id": "986fda9dd32cb5227e4b8f5c97f028399188b1fa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 454, "license_type": "no_license", "max_line_length": 44, "num_lines": 21, "path": "/Lorentz_factor/generate_lorenz_img.py", "repo_name": "Jean-Sim/linear-algbra-collection", "src_encoding": "UTF-8", "text": "from generate_invers_lorenz.py import lorenz\nimport matplotlib.pyplot as plt\n\n\ndef generate_img (lor):\n quo = 10001\n # amount of data points on graph\n\n val = []\n for spe in range(1, quo):\n val.append(lorenz((c/quo)*spe))\n\n x = []\n for element in range(1, quo):\n x.append((1/quo)*element)\n # generates x and y value \n \n plt.plot(x, val)\n plt.plot([inp/c], [lorenz(lor)], 'ro')\n # plots graph and data point differently\n plt.show()\n" }, { "alpha_fraction": 0.5407209396362305, "alphanum_fraction": 0.5607476830482483, "avg_line_length": 26.884614944458008, "blob_id": "05fe44510b5e5a120743a63977e45cffd47dda90", "content_id": "12209a2496127f895be63f14f015eb6427055868", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 749, "license_type": "no_license", "max_line_length": 107, "num_lines": 26, "path": "/web-app/web-app-modules/angle_two_vectors.py", "repo_name": "Jean-Sim/linear-algbra-collection", "src_encoding": "UTF-8", "text": "def angle(vec):\r\n import math\r\n from content.modules_for_Web_app.web_app_tool import final_step\r\n vector_1, vector_2 = final_step(vec)\r\n\r\n def dot_product(vector_1, vector_2):\r\n dot_product = 0\r\n for index in range(len(vector_1)):\r\n try:\r\n dot_product += vector_1[index] * vector_2[index]\r\n except IndexError:\r\n \"\"\r\n return dot_product\r\n\r\n def absolute_value(vector):\r\n import math\r\n val = 0\r\n\r\n for element in vector:\r\n val += element ** 2\r\n\r\n return math.sqrt(val)\r\n\r\n result = math.acos(dot_product(vector_2, vector_1)/(absolute_value(vector_2)*absolute_value(vector_1)))\r\n\r\n return round(math.degrees(result), 5)" }, { "alpha_fraction": 0.5980197787284851, "alphanum_fraction": 0.6099010109901428, "avg_line_length": 36.846153259277344, "blob_id": "90701b905f91a1137e55bf58b8fa6e9f3e875b45", "content_id": "d108bc92dd8232b767cc182c2b2ebf43173983fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 505, "license_type": "no_license", "max_line_length": 71, "num_lines": 13, "path": "/individual modules/dot_product.py", "repo_name": "Jean-Sim/linear-algbra-collection", "src_encoding": "UTF-8", "text": "def dot_product(vector_1, vector_2):\r\n dot_product = 0\r\n # defines end variable \r\n for index in range(len(vector_1)):\r\n # goes through each element in the two vectors \r\n try:\r\n dot_product += vector_1[index] * vector_2[index]\r\n # multiplies them then adds them to dot_product\r\n except IndexError:\r\n \"incorrect dimensions please check input\"\r\n # returns an ERROR massage when it recives incorrect input \r\n\r\n return dot_product\r\n" }, { "alpha_fraction": 0.626334547996521, "alphanum_fraction": 0.6548042893409729, "avg_line_length": 33.125, "blob_id": "65d638a004f0ed246d167c9c809893a565581373", "content_id": "e0de285a6844cce15d491bc262360fd2e5fe9aff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 281, "license_type": "no_license", "max_line_length": 72, "num_lines": 8, "path": "/web-app/web-app-modules/finde_position_vector.py", "repo_name": "Jean-Sim/linear-algbra-collection", "src_encoding": "UTF-8", "text": "def finde_position_vector(vec):\r\n from content.modules_for_Web_app.web_app_tool import final_step\r\n point_1, point_2 = final_step(vec)\r\n\r\n for element in range(len(point_1)):\r\n point_2[element] = round(point_2[element] - point_1[element], 5)\r\n\r\n return point_2\r\n" }, { "alpha_fraction": 0.5185676217079163, "alphanum_fraction": 0.5198938846588135, "avg_line_length": 32.181819915771484, "blob_id": "a25840b0044dc159f5bf4e2a0c7703ed8b7c18dd", "content_id": "c3a3b0bc8d94b7cdfe3367337cc7d4624c3146ac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 754, "license_type": "no_license", "max_line_length": 87, "num_lines": 22, "path": "/web-app/web-app-modules/translation.py", "repo_name": "Jean-Sim/linear-algbra-collection", "src_encoding": "UTF-8", "text": "def translation(inp):\r\n from content.modules_for_Web_app.web_app_tool import final_step\r\n vector, matrix = final_step(inp)\r\n def VM_multiplication(vector, matrix):\r\n for element in range(len(vector)):\r\n try:\r\n for collum in range(len(matrix[element])):\r\n matrix[element][collum] = matrix[element][collum] * vector[element]\r\n except IndexError:\r\n \"\"\r\n\r\n for new in range(len(vector)):\r\n vector[new] = 0\r\n try:\r\n for col in range(len(matrix)):\r\n vector[new] += matrix[col][new]\r\n except IndexError:\r\n \"\"\r\n\r\n return vector\r\n\r\n return VM_multiplication(vector, matrix)\r\n\r\n" }, { "alpha_fraction": 0.5308924317359924, "alphanum_fraction": 0.6109839677810669, "avg_line_length": 37.90909194946289, "blob_id": "6c02259705f72b3dad201550a23a79723f6a9eb3", "content_id": "4416eb8156b6553f131eb7c2b6a6a4aadbdb4f31", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 437, "license_type": "no_license", "max_line_length": 80, "num_lines": 11, "path": "/web-app/web-app-modules/cross_product.py", "repo_name": "Jean-Sim/linear-algbra-collection", "src_encoding": "UTF-8", "text": "def cross_product(vec):\r\n from content.modules_for_Web_app.web_app_tool import final_step\r\n vector_1, vector_2 = final_step(vec)\r\n\r\n new_vector = [1, 1, 1]\r\n\r\n new_vector[0] *= round(vector_1[1]*vector_2[2] - vector_1[2]*vector_2[1], 5)\r\n new_vector[1] *= round(vector_1[2]*vector_2[0] - vector_1[0]*vector_2[2], 5)\r\n new_vector[2] *= round(vector_1[0]*vector_2[1] - vector_1[1]*vector_2[0], 5)\r\n\r\n return new_vector" }, { "alpha_fraction": 0.43457943201065063, "alphanum_fraction": 0.5186915993690491, "avg_line_length": 33.5, "blob_id": "05e5e4a44ee3dd614135e53e0388a7e5904ac52c", "content_id": "adb113b173a3848c3a8985d1e5ea2616067b20e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 428, "license_type": "no_license", "max_line_length": 79, "num_lines": 12, "path": "/individual modules/eigenvalue_for_2x2.py", "repo_name": "Jean-Sim/linear-algbra-collection", "src_encoding": "UTF-8", "text": "def eigenvalue_2x2(matrix):\r\n import math\r\n\r\n # solves quadratic eq\r\n result1 = (matrix[0][0] + matrix[1][1]) / 2 + math.sqrt(\r\n ((matrix[0][0] + matrix[1][1]) / 2) ** 2 - matrix[0][0] * matrix[1][1])\r\n\r\n result2 = (matrix[0][0] + matrix[1][1]) / 2 - math.sqrt(\r\n ((matrix[0][0] + matrix[1][1]) / 2) ** 2 - matrix[0][0] * matrix[1][1])\r\n\r\n # returns two eigenvalues \r\n return result1, result2\r\n\r\n" }, { "alpha_fraction": 0.5723720192909241, "alphanum_fraction": 0.5811777710914612, "avg_line_length": 30.454545974731445, "blob_id": "308b8e1fc1eefc684b4785193645bc8d9e953fa8", "content_id": "d444019814fe1673680750f2ba96137ed22e855f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1817, "license_type": "no_license", "max_line_length": 76, "num_lines": 55, "path": "/web-app/web_app_tool.py", "repo_name": "Jean-Sim/linear-algbra-collection", "src_encoding": "UTF-8", "text": "\"\"\"\r\nto declear a matrix use those () please seperate elements inside by ;\r\nto declear a vector use those [] saparated by ,\r\nto declear a element inside a vector use those , to seperate each number\r\nto separate diffrint inputs use |\r\n\"\"\"\r\n\r\ndef string_vector(lis):\r\n lis = lis.strip(\"[\")\r\n lis = lis.strip(\"]\")\r\n # removes all unnecessary brackets from string vector\r\n lis = lis.split(\",\")\r\n # splits the vector in string form into an array \r\n for element in range(len(lis)):\r\n lis[element] = float(lis[element])\r\n # turns the number string in to numbers\r\n return lis\r\n\r\ndef string_matrix(mis):\r\n mis = mis.strip(\"(\")\r\n mis = mis.strip(\")\")\r\n # removes the unnecessary elements from the string\r\n mis = mis.split(\";\")\r\n # splits it into individual vector parts\r\n for element in range(len(mis)):\r\n mis[element] = string_vector(mis[element])\r\n # turns the string matrix in to a list matrix\r\n return mis\r\n\r\ndef final_step(st):\r\n st = st.split(\"|\")\r\n new_st = []\r\n # defines new list and splits the two inputs\r\n\r\n if \"([\" in st[0]:\r\n new_st.append(string_matrix(st[0]))\r\n elif \"[\" in st[0]:\r\n new_st.append(string_vector(st[0]))\r\n else:\r\n new_st.append(float(st[0]))\r\n # checks whether it is matrix, vector or num\r\n\r\n if \"([\" in st[1]:\r\n new_st.append(string_matrix(st[1]))\r\n elif \"[\" in st[1]:\r\n if type(new_st[0]) == float:\r\n new_st.append(string_vector(st[1]))\r\n else:\r\n new_st.insert(0, string_vector(st[1]))\r\n else:\r\n new_st.insert(0, float(st[0]))\r\n # checks whether it is matrix, vector or num\r\n # and inserts them in the correct position so that num , vector , matrix\r\n\r\n return new_st[0], new_st[1]\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.5570470094680786, "alphanum_fraction": 0.563758373260498, "avg_line_length": 27.799999237060547, "blob_id": "3967988b10b829cb14d5d2ff7f6850ea2f2f3589", "content_id": "1eda3701b7027544aeb05acdb8fb75db168b9e42", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 447, "license_type": "no_license", "max_line_length": 75, "num_lines": 15, "path": "/individual modules/mid_vector.py", "repo_name": "Jean-Sim/linear-algbra-collection", "src_encoding": "UTF-8", "text": "# note we have decided that this module will not be included in the web app\r\ndef mid(lis_vec):\r\n # input must be list of vectors\r\n new_vec = []\r\n add = 0\r\n\r\n for vec_row in range(len(lis_vec[0])):\r\n for element in lis_vec:\r\n add += element[vec_row]\r\n # adds each element of vector\r\n new_vec.append(add / len(vec_row))\r\n # divides by number of vectors \r\n add = 0\r\n\r\n return new_vec\r\n" }, { "alpha_fraction": 0.5546218752861023, "alphanum_fraction": 0.5742297172546387, "avg_line_length": 29.909090042114258, "blob_id": "ff0a6d263683d410a7792810540df2afb854dc01", "content_id": "27c8b25661a2d208f0146146e903115f3ff91cd2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 357, "license_type": "no_license", "max_line_length": 67, "num_lines": 11, "path": "/web-app/web-app-modules/dot_product.py", "repo_name": "Jean-Sim/linear-algbra-collection", "src_encoding": "UTF-8", "text": "def dot_product(vec):\r\n from content.modules_for_Web_app.web_app_tool import final_step\r\n vector_1, vector_2 = final_step(vec)\r\n dot_product = 0\r\n for index in range(len(vector_1)):\r\n try:\r\n dot_product += vector_1[index] * vector_2[index]\r\n except IndexError:\r\n \"\"\r\n\r\n return round(dot_product, 5)\r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.5168554782867432, "alphanum_fraction": 0.5375794172286987, "avg_line_length": 29.46956443786621, "blob_id": "016549112e580b175d16fd88561a962a1537900d", "content_id": "bd6a35d8f826403ee675633e1a1dbc2533090984", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7238, "license_type": "no_license", "max_line_length": 123, "num_lines": 230, "path": "/linear-algbra-collection.py", "repo_name": "Jean-Sim/linear-algbra-collection", "src_encoding": "UTF-8", "text": "import math\r\n\r\ndef eigenvalue_2x2(matrix):\r\n # solves a quadratic equation\r\n # uses the PQ formula\r\n result1 = (matrix[0][0] + matrix[1][1]) / 2 + math.sqrt(\r\n ((matrix[0][0] + matrix[1][1]) / 2) ** 2 - matrix[0][0] * matrix[1][1])\r\n\r\n result2 = (matrix[0][0] + matrix[1][1]) / 2 - math.sqrt(\r\n ((matrix[0][0] + matrix[1][1]) / 2) ** 2 - matrix[0][0] * matrix[1][1])\r\n # result is returned as two possible solutions\r\n return result1, result2\r\n\r\n\r\ndef dot_product(vector_1, vector_2):\r\n dot_product = 0\r\n # defines end variable \r\n for index in range(len(vector_1)):\r\n # goes through each element in the two vectors \r\n try:\r\n dot_product += vector_1[index] * vector_2[index]\r\n # multiplies them then adds them to dot_product\r\n except IndexError:\r\n \"incorrect dimensions please check input\"\r\n # returns an ERROR massage when it recives incorrect input \r\n return dot_product\r\n\r\ndef VM_multiplication(vector, matrix):\r\n for element in range(len(vector)):\r\n # goes through the matrix and then multiplies with correct vector element\r\n try:\r\n for column in range(len(matrix[element])):\r\n matrix[element][column] = matrix[element][column]*vector[element]\r\n except IndexError:\r\n print(\"incorrect dimensions please check your input\")\r\n for new in range(len(vector)):\r\n # empties vector\r\n vector[new] = 0\r\n # adds matrix columns together and then adds to vector\r\n try:\r\n for col in range(len(matrix)):\r\n vector[new] += matrix[col][new]\r\n except IndexError:\r\n print(\"incorrect dimensions please check your input\")\r\n #checks for index error and excepts it \r\n \r\n return vector\r\n\r\ndef vector_addition(vector_1, vector_2):\r\n # takes in two vectors and uses a for loop to itrate ove them \r\n for element in range(len(vector_2)):\r\n # then adds elememnts together \r\n vector_1[element] += vector_2[element]\r\n \r\n return vector_1\r\n\r\ndef MM_multiplication(matrix_1, matrix_2):\r\n # requires square matrix\r\n end_matrix = []\r\n for element in range(len(matrix_1)):\r\n end_matrix.append([])\r\n for none in matrix_1:\r\n end_matrix[element].append(0)\r\n # creates the return matrix \r\n\r\n for i in range(len(matrix_1)):\r\n for j in range(len(matrix_1)):\r\n for k in range(len(matrix_1)):\r\n end_matrix[i][j] += matrix_1[i][k] * matrix_2[k][j]\r\n # then useses matrix multiplication when treating one if the matrixis as a vector before adding them to return matrix \r\n \r\n return end_matrix\r\n\r\ndef rowEchelon(M):\r\n def normalise(M):\r\n # normalise is used to translate between deiffrent way of inputing a matrix\r\n new_M = []\r\n for element in range(len(M)):\r\n new_M.append([])\r\n for element2 in range(len(M)):\r\n new_M[element].append(M[element2][element])\r\n \r\n return new_M\r\n\r\n def rowMod(M, i, j, x):\r\n M[i] = [a + x * b for a, b in zip(M[i], M[j])]\r\n # define row mod function\r\n\r\n M = normalise(M)\r\n row, col = 0, 0\r\n rows, cols = len(M), len(M[0])\r\n while row < rows and col < cols:\r\n if M[row][col] == 0:\r\n for r in range(row +1, rows):\r\n if M[r][col] != 0:\r\n rowMod(M, row, r, 1)\r\n break\r\n # uses while loop to go through matrix \r\n\r\n if M[row][col] == 0:\r\n col += 1\r\n continue\r\n pivot = M[row][col]\r\n\r\n for r in range(row +1, rows):\r\n if M[r][col] != 0:\r\n rowMod(M, r, row, -M[r][col] / pivot)\r\n row += 1\r\n col += 1\r\n \r\n return normalise(M)\r\n\r\ndef determinant(M):\r\n def normalise(M):\r\n new_M = []\r\n # creats a new Matrix\r\n for element in range(len(M)):\r\n new_M.append([])\r\n # appand new sub vectore \r\n for element2 in range(len(M)):\r\n # inserts the element in correct position \r\n new_M[element].append(M[element2][element])\r\n return new_M\r\n # changes Matrix in to the right shape \r\n\r\n num = 1\r\n new_M = rowEchelon(M)\r\n for element in range(len(M)):\r\n num = num*new_M[element][element]\r\n # multiplies elements diagonally\r\n return num\r\n\r\ndef half_rotation(matrix):\r\n new_ = []\r\n for c in range(len(matrix)):\r\n new_.append([])\r\n for no_use in matrix:\r\n new_[c].append(0)\r\n \r\n count = 0\r\n \r\n for col in range(len(new_)):\r\n count += 1\r\n new_[col][count*-1] = 1\r\n \r\n return MM_multiplication(matrix, new_)\r\n\r\ndef solve_eq(matrix, result):\r\n def abs(lis):\r\n if lis < 0:\r\n lis = lis*-1\r\n \r\n return lis\r\n\r\n\r\n for k in range(len(result)):\r\n if abs(matrix[k][k]) < 0:\r\n for i in range(k+1, len(result)):\r\n if abs(matrix[i][k]) > abs(matrix[k][k]):\r\n for j in range(k,len(result)):\r\n matrix[k][j], matrix[i][j] = matrix[i][j], matrix[k][j]\r\n result[k], result[i] = result[i], result[k]\r\n break\r\n pivot = matrix[k][k]\r\n for j in range(k, len(result)):\r\n matrix[k][j] /= pivot\r\n result[k] /= pivot\r\n\r\n for i in range(len(result)):\r\n if i == k or matrix[i][k] == 0:\r\n continue\r\n factore = matrix[i][k]\r\n for j in range(k, len(result)):\r\n matrix[i][j] -= factore * matrix[k][j]\r\n result[i] -= factore*result[k]\r\n \r\n return matrix, result\r\n\r\ndef cross_product(vector_1, vector_2):\r\n new_vector = [1, 1, 1]\r\n\r\n new_vector[0] *= vector_1[1]*vector_2[2] - vector_1[2]*vector_2[1]\r\n new_vector[1] *= vector_1[2]*vector_2[0] - vector_1[0]*vector_2[2]\r\n new_vector[2] *= vector_1[0]*vector_2[1] - vector_1[1]*vector_2[0]\r\n\r\n return new_vector\r\n\r\ndef absolute_value(vector):\r\n val = 0\r\n # defines zero value \r\n\r\n for element in vector:\r\n val += element**2\r\n\r\n return math.sqrt(val)\r\n\r\ndef angle(vector_1, vector_2):\r\n result = math.acos(dot_product(vector_2,vector_1)/absolute_value(vector_2)*absolute_value(vector_1))\r\n return math.degrees(result)\r\n\r\ndef finde_position_vector(point_1, point_2):\r\n for element in range(len(point_1)):\r\n point_2[element] = point_2[element] - point_1[element]\r\n \r\n return point_2\r\n\r\ndef eigen_vectore_2x2(matrix):\r\n matrix[0,0] -= eigenvalue_2x2(matrix)\r\n matrix[1,1] -= eigenvalue_2x2(matrix)\r\n\r\n return solve_eq(matrix, [0,0])[1]\r\n\r\ndef translation(vectore, matrix):\r\n new_result = VM_multiplication(vectore, matrix)\r\n return new_result\r\n\r\ndef mid_vector(lis_vec):\r\n # input must be list of vectors\r\n new_vec = []\r\n add = 0\r\n\r\n for vec_row in range(len(lis_vec[0])):\r\n # adds all vector elements together\r\n for element in lis_vec:\r\n add += element[vec_row]\r\n new_vec.append(add / len(vec_row))\r\n # divides by amount of vector\r\n add = 0\r\n\r\n return new_vec\r\n" }, { "alpha_fraction": 0.5854922533035278, "alphanum_fraction": 0.5867875814437866, "avg_line_length": 36.20000076293945, "blob_id": "a93a34db2de78e84873a436f803f766949721d80", "content_id": "208ba32f664a8abd6614a16eed03313dcdad3c03", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 772, "license_type": "no_license", "max_line_length": 81, "num_lines": 20, "path": "/individual modules/vector_matrix_multiplication.py", "repo_name": "Jean-Sim/linear-algbra-collection", "src_encoding": "UTF-8", "text": "def VM_multiplication(vector, matrix):\r\n for element in range(len(vector)):\r\n # goes through the matrix and then multiplies with correct vector element\r\n try:\r\n for column in range(len(matrix[element])):\r\n matrix[element][column] = matrix[element][column]*vector[element]\r\n except IndexError:\r\n \"incorrect dimensions please check input\"\r\n\r\n for new in range(len(vector)):\r\n # empties vector \r\n vector[new] = 0\r\n # adds matrix columns together and then adds to vector\r\n try:\r\n for col in range(len(matrix)):\r\n vector[new] += matrix[col][new]\r\n except IndexError:\r\n \"incorrect dimensions please check input\"\r\n\r\n return vector\r\n\r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.6028784513473511, "alphanum_fraction": 0.613539457321167, "avg_line_length": 33.109092712402344, "blob_id": "a13b8df1d4b801ea3f5663c46661d156a95ae4e9", "content_id": "6a7867d822c79c51d74821d04b7e3de49c9a4ee1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1876, "license_type": "no_license", "max_line_length": 159, "num_lines": 55, "path": "/web_app_tools.py", "repo_name": "Jean-Sim/linear-algbra-collection", "src_encoding": "UTF-8", "text": "\"\"\"\ninput rules\n1 to declare a matrix use those () ... each element inside the matrix must be separated by ; ... please input each matrix column as a vector as shown in rule 2\n2 to declare a vector use those [] ... numbers inside must be separated by ,\n3 if the module you are using take multiple inputs use | to separate them\n\"\"\"\n# Note the outputorder is as follows: Skalar<-Vector<-Matrix \n\ndef string_vector(lis):\n lis = lis.strip(\"[\")\n lis = lis.strip(\"]\")\n # removes unnecessary elements from string\n lis = lis.split(\",\")\n # split in to individual numbers\n for element in range(len(lis)):\n lis[element] = float(lis[element])\n # turns each string out of the list \n return lis\n\ndef string_matrix(mis):\n mis = str(mis).strip(\"(\")\n mis = str(mis).strip(\")\")\n # removes unnecessary elements from string\n mis = mis.split(\";\")\n # splits it into individual vector parts\n for element in range(len(mis)):\n mis[element] = string_vector(mis[element])\n # turns string in to list\n return mis\n\ndef final_step(st):\n st = str(st).split(\"|\")\n new_st = []\n # defines new_st and splits st in to two elements\n if \"([\" in st[0]:\n new_st.append(string_matrix(st[0]))\n elif \"[\" in st[0]:\n new_st.append(string_vector(st[0]))\n else:\n new_st.append(float(st[0]))\n # checks whether it is matrix, vector or num\n\n if \"([\" in st[1]:\n new_st.append(string_matrix(st[1]))\n elif \"[\" in st[1]:\n if type(new_st[0]) == float:\n new_st.append(string_vector(st[1]))\n else:\n new_st.insert(0, string_vector(st[1]))\n else:\n new_st.insert(0, float(st[0]))\n # checks if it is a matrix, vector or num\n # and then inserts them in the correct position so that the return order is |num , vector , matrix|\n\n return new_st[0], new_st[1]\n" }, { "alpha_fraction": 0.800000011920929, "alphanum_fraction": 0.800000011920929, "avg_line_length": 33.625, "blob_id": "fa12ded03a9361aa7df18f01267ed425961a73ef", "content_id": "92ef851eed5612f139c3ec80a25e815610cf8722", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 280, "license_type": "no_license", "max_line_length": 140, "num_lines": 8, "path": "/README.md", "repo_name": "Jean-Sim/linear-algbra-collection", "src_encoding": "UTF-8", "text": "# linear-algbra-collection\nThe linear algebra collection is supposed to provide tools for diffrent algebraic problems that are not covered by popular python libraries.\n\nWe have created a website from which you can access our modules\n\n\n**Website** \n JeanSim.pythonanywhere.com\n\n\n\n" }, { "alpha_fraction": 0.7834911942481995, "alphanum_fraction": 0.7834911942481995, "avg_line_length": 60.58333206176758, "blob_id": "25c11731c4ad6271a632447aa0460779fae793e9", "content_id": "e4d1474cb4d0a0e78ba36108288605e13e7cb6c4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 739, "license_type": "no_license", "max_line_length": 128, "num_lines": 12, "path": "/CONTRIBUTE.md", "repo_name": "Jean-Sim/linear-algbra-collection", "src_encoding": "UTF-8", "text": "# We appreciate your contributions \n\n**there are just a few things you have to know**\n- the maintainer will take care of updating the website with the code you have contributed !\n- write all your scripts in python !\n- please only submit code that is relevant to the approved Projects !\n- if you want to add a Project please open an issue. The maintainers will decide weather it will be added to the official list !\n\n**list of Projects**\n- the linear algbra collection [various tools for mathematical problems in linear algebra]\n- online Lorentz factor calculator [an online tool to calculate the lorentz factor for a given speed]\n- Newton's method for solving equations [an online tool for solving equations which employs Newton's method]\n" }, { "alpha_fraction": 0.5688559412956238, "alphanum_fraction": 0.5868644118309021, "avg_line_length": 37.33333206176758, "blob_id": "005ab4aee89b4d3c6b38fbaf88ab4ed148b41ca0", "content_id": "dabaff6f614a13368bf9c36ec84b14e81e67b4c1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 945, "license_type": "no_license", "max_line_length": 116, "num_lines": 24, "path": "/individual modules/angle_two_vectors.py", "repo_name": "Jean-Sim/linear-algbra-collection", "src_encoding": "UTF-8", "text": "def angle(vector_1, vector_2):\r\n #uses dotproduct = |vector_1|*|vector_2|*cos(a)\r\n from math import sqrt\r\n def dot_product(vector_1, vector_2):\r\n dot_product = 0\r\n for index in range(len(vector_1)):\r\n # interrates through both vectore at the same time\r\n try:\r\n dot_product += vector_1[index] * vector_2[index]\r\n # multiplies both vectore elements and adds them to dot_product\r\n except IndexError:\r\n \"ERROR\"\r\n\r\n return dot_product\r\n\r\n def absolute_value(vector):\r\n val = 0\r\n for element in vector:\r\n val += element ** 2\r\n # takes each value form vectore square a´nd adds them\r\n\r\n return sqrt(val)\r\n # uses fromula from line 2 | result in arch length turned to degrees \r\n return math.degrees(math.acos(dot_product(vector_2,vector_1)/absolute_value(vector_2)*absolute_value(vector_1)))\r\n" }, { "alpha_fraction": 0.5635964870452881, "alphanum_fraction": 0.5811403393745422, "avg_line_length": 34.07692337036133, "blob_id": "786f8c9ad65293126cdd1f79b3fa92774dc74fe6", "content_id": "f437353e37b208298b1129ac9c71b25d82e75403", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 912, "license_type": "no_license", "max_line_length": 96, "num_lines": 26, "path": "/web-app/final_step.py", "repo_name": "Jean-Sim/linear-algbra-collection", "src_encoding": "UTF-8", "text": "def final_step(st):\n st = st.split(\"|\")\n new_st = []\n # defines new_st and splits the original input into two \n\n if \"([\" in st[0]:\n new_st.append(string_matrix(st[0]))\n elif \"[\" in st[0]:\n new_st.append(string_vector(st[0]))\n else:\n new_st.append(float(st[0]))\n # checks whether the input is a matrix, vector or num\n\n if \"([\" in st[1]:\n new_st.append(string_matrix(st[1]))\n elif \"[\" in st[1]:\n if type(new_st[0]) == float:\n new_st.append(string_vector(st[1]))\n else:\n new_st.insert(0, string_vector(st[1]))\n # checks whether it is scalar, matrix, or vector then inserts it into the correct position \n else:\n new_st.insert(0, float(st[0]))\n # checks whether it is matrix, vector or num\n # and inserts them in the correct position so that order is num , vector , matrix\n return new_st[0], new_st[1]\n" }, { "alpha_fraction": 0.5477099418640137, "alphanum_fraction": 0.5667939186096191, "avg_line_length": 32.79999923706055, "blob_id": "9fb4d2202dc9159725143107a1ccf1d12361c072", "content_id": "97be8ba29544d2be80d7f1913d079446724f10d1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 524, "license_type": "no_license", "max_line_length": 67, "num_lines": 15, "path": "/individual modules/matrix_matrix_multiplikation.py", "repo_name": "Jean-Sim/linear-algbra-collection", "src_encoding": "UTF-8", "text": "def MM_multiplication(matrix_1, matrix_2):\r\n end_matrix = []\r\n for element in range(len(matrix_1)):\r\n end_matrix.append([])\r\n for none in matrix_1:\r\n end_matrix[element].append(0)\r\n # creates endmatrix\r\n\r\n for i in range(len(matrix_1)):\r\n for j in range(len(matrix_1)):\r\n for k in range(len(matrix_1)):\r\n end_matrix[i][j] += matrix_1[i][k] * matrix_2[k][j]\r\n # goes through each amtrix and adds there product to end matrix\r\n\r\n return end_matrix\r\n\r\n" }, { "alpha_fraction": 0.6142321825027466, "alphanum_fraction": 0.6254681944847107, "avg_line_length": 22.090909957885742, "blob_id": "7fabe1e16b25431e82a2bbfa08db30c9b77ccc3f", "content_id": "0ba61d5fbf5f943585dfac6cdbb70a1bb46810ca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 267, "license_type": "no_license", "max_line_length": 70, "num_lines": 11, "path": "/web-app/web-app-modules/absolute_value.py", "repo_name": "Jean-Sim/linear-algbra-collection", "src_encoding": "UTF-8", "text": "def absolute_value(vector):\r\n from content.modules_for_Web_app.web_app_tool import string_vector\r\n vector = string_vector(vector)\r\n import math\r\n\r\n val = 0\r\n\r\n for element in vector:\r\n val += element**2\r\n\r\n return round(math.sqrt(val), 5)\r\n\r\n" }, { "alpha_fraction": 0.5866666436195374, "alphanum_fraction": 0.6733333468437195, "avg_line_length": 24, "blob_id": "0c6fcbb87f16538b371560bc009824fd491a0c15", "content_id": "dbbf650b91d8183a055c4b05d954996ec18d1adf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 150, "license_type": "no_license", "max_line_length": 40, "num_lines": 6, "path": "/Lorentz_factor/generate_lorenz.py", "repo_name": "Jean-Sim/linear-algbra-collection", "src_encoding": "UTF-8", "text": "def lorenz(num):\n import math\n c = 299792458\n# speed of light constant\n return 1/math.sqrt(1-(num**2 /c**2))\n# computes invers lorenzfactore\n" }, { "alpha_fraction": 0.4232480525970459, "alphanum_fraction": 0.453281432390213, "avg_line_length": 36.212764739990234, "blob_id": "d89e8c1140226c66d69c72642de138096261f971", "content_id": "89df829c65dbd255b94aad36f94f6417ae9bac82", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1798, "license_type": "no_license", "max_line_length": 83, "num_lines": 47, "path": "/individual modules/eigen_vektor_2x2.py", "repo_name": "Jean-Sim/linear-algbra-collection", "src_encoding": "UTF-8", "text": "def eigen_vectore_2x2(matrix):\r\n def eigenvalue_2x2(matrix):\r\n import math\r\n\r\n result1 = (matrix[0][0] + matrix[1][1]) / 2 + math.sqrt(\r\n ((matrix[0][0] + matrix[1][1]) / 2) ** 2 - matrix[0][0] * matrix[1][1])\r\n\r\n result2 = (matrix[0][0] + matrix[1][1]) / 2 - math.sqrt(\r\n ((matrix[0][0] + matrix[1][1]) / 2) ** 2 - matrix[0][0] * matrix[1][1])\r\n\r\n return result1, result2\r\n # first makes use of eigenvalue module and solves for eigen Values \r\n\r\n matrix[0,0] -= eigenvalue_2x2(matrix)\r\n matrix[1,1] -= eigenvalue_2x2(matrix)\r\n # creates a matrix with eigen Values \r\n\r\n def solve_eq(matrix, result):\r\n def abs(lis):\r\n if lis < 0:\r\n lis = lis * -1\r\n return lis\r\n\r\n for k in range(len(result)):\r\n if abs(matrix[k][k]) < 0:\r\n for i in range(k + 1, len(result)):\r\n if abs(matrix[i][k]) > abs(matrix[k][k]):\r\n for j in range(k, len(result)):\r\n matrix[k][j], matrix[i][j] = matrix[i][j], matrix[k][j]\r\n result[k], result[i] = result[i], result[k]\r\n break\r\n pivot = matrix[k][k]\r\n for j in range(k, len(result)):\r\n matrix[k][j] /= pivot\r\n result[k] /= pivot\r\n\r\n for i in range(len(result)):\r\n if i == k or matrix[i][k] == 0:\r\n continue\r\n factore = matrix[i][k]\r\n for j in range(k, len(result)):\r\n matrix[i][j] -= factore * matrix[k][j]\r\n result[i] -= factore * result[k]\r\n return matrix, result\r\n # this solves for zero vector result \r\n\r\n return solve_eq(matrix, [0,0])[1]\r\n\r\n" }, { "alpha_fraction": 0.6577669978141785, "alphanum_fraction": 0.696601927280426, "avg_line_length": 35.272727966308594, "blob_id": "a7ddd4491a4198d816f45dbbab5d24b40b0f7924", "content_id": "f81ba40c118bb6861fbb1a9d457fe8f1de71fab5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 412, "license_type": "no_license", "max_line_length": 77, "num_lines": 11, "path": "/web-app/web-app-modules/eigen_vektor_2x2.py", "repo_name": "Jean-Sim/linear-algbra-collection", "src_encoding": "UTF-8", "text": "def eigen_vectore_2x2(matrix):\r\n from content.modules_for_Web_app.eigenvalue_for_2x2 import eigenvalue_2x2\r\n from content.modules_for_Web_app.web_app_tool import string_matrix\r\n from content.modules_for_Web_app.solve_eq import solve_eq\r\n matrix = string_matrix(matrix)\r\n\r\n matrix[0,0] -= eigenvalue_2x2(matrix)\r\n matrix[1,1] -= eigenvalue_2x2(matrix)\r\n\r\n\r\n return solve_eq(matrix, [0,0])\r\n\r\n" }, { "alpha_fraction": 0.6325878500938416, "alphanum_fraction": 0.6389776468276978, "avg_line_length": 26.272727966308594, "blob_id": "8bfef4e6a7e9b8a6512c0ad348bc75a66e748685", "content_id": "53e035d9a47c95f9df952c62b964517fffe04711", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 313, "license_type": "no_license", "max_line_length": 66, "num_lines": 11, "path": "/individual modules/absolute_value.py", "repo_name": "Jean-Sim/linear-algbra-collection", "src_encoding": "UTF-8", "text": "def absolute_value(vector):\r\n import sqrt from math\r\n # impor the sqrt function from python standart math module\r\n val = 0\r\n # defines variable\r\n\r\n for element in vector:\r\n val += element**2\r\n # adds the squares of all vectore elements to the variable\r\n\r\n return math.sqrt(val)\r\n\r\n" }, { "alpha_fraction": 0.6374502182006836, "alphanum_fraction": 0.6653386354446411, "avg_line_length": 39.83333206176758, "blob_id": "379f1c5135981ad4473df8faab454ad547479ce1", "content_id": "d0eab774128618f597785577868f3f91b0691427", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 251, "license_type": "no_license", "max_line_length": 76, "num_lines": 6, "path": "/individual modules/finde_position_vector.py", "repo_name": "Jean-Sim/linear-algbra-collection", "src_encoding": "UTF-8", "text": "def finde_position_vector(point_1, point_2):\r\n for element in range(len(point_1)):\r\n # goes through each element between those points and subtracts them \r\n point_2[element] = point_2[element] - point_1[element]\r\n\r\n return point_2\r\n" }, { "alpha_fraction": 0.6338028311729431, "alphanum_fraction": 0.6338028311729431, "avg_line_length": 34.5, "blob_id": "d3cc01a23419054ae0d4d00b9b416bf8c79f19ba", "content_id": "9707a2e748fea2abc2b4d0c57e6c52d8967f9185", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 355, "license_type": "no_license", "max_line_length": 54, "num_lines": 10, "path": "/web-app/str_mat.py", "repo_name": "Jean-Sim/linear-algbra-collection", "src_encoding": "UTF-8", "text": "def string_matrix(mis):\n mis = mis.strip(\"(\")\n mis = mis.strip(\")\")\n # removes the unnecessary elements from the string\n mis = mis.split(\";\")\n # splits it into individual vector parts\n for element in range(len(mis)):\n mis[element] = string_vector(mis[element])\n # turns the string matrix in to a list matrix\n return mis\n" }, { "alpha_fraction": 0.4933035671710968, "alphanum_fraction": 0.5100446343421936, "avg_line_length": 29.928571701049805, "blob_id": "cbb9f5a64392de20619b92c1a3db09c576c032f3", "content_id": "cdeed140c878f18c58830d58439b2db3e1dcc0a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 896, "license_type": "no_license", "max_line_length": 71, "num_lines": 28, "path": "/individual modules/half_rotation.py", "repo_name": "Jean-Sim/linear-algbra-collection", "src_encoding": "UTF-8", "text": "def half_rotation(matrix):\r\n # uses MM multiplication module \r\n def MM_multiplication(matrix_1, matrix_2):\r\n end_matrix = []\r\n for element in range(len(matrix_1)):\r\n end_matrix.append([])\r\n for no_use in matrix_1:\r\n end_matrix[element].append(0)\r\n\r\n for i in range(len(matrix_1)):\r\n for j in range(len(matrix_1)):\r\n for k in range(len(matrix_1)):\r\n end_matrix[i][j] += matrix_1[i][k] * matrix_2[k][j]\r\n return end_matrix\r\n\r\n new_ = []\r\n # creates a zero matrix \r\n for c in range(len(matrix)):\r\n new_.append([])\r\n for no_use in matrix:\r\n new_[c].append(0)\r\n count = 0\r\n # creates a inverted basis matrix \r\n for col in range(len(new_)):\r\n count += 1\r\n new_[col][count*-1] = 1\r\n\r\n return MM_multiplication(matrix, new_)\r\n\r\n" }, { "alpha_fraction": 0.5959367752075195, "alphanum_fraction": 0.6546275615692139, "avg_line_length": 42.29999923706055, "blob_id": "0ae1f52386be6b44d1e1faa917419c248c349bb9", "content_id": "aafff0e40108fb46ee1400a4ea77a499c5278037", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 443, "license_type": "no_license", "max_line_length": 72, "num_lines": 10, "path": "/individual modules/cross_product.py", "repo_name": "Jean-Sim/linear-algbra-collection", "src_encoding": "UTF-8", "text": "def cross_product(vector_1, vector_2):\r\n new_vector = []\r\n # defines empty list that will be contain the future end vectore \r\n \r\n new_vector.append(vector_1[1]*vector_2[2] - vector_1[2]*vector_2[1])\r\n new_vector.append(vector_1[2]*vector_2[0] - vector_1[0]*vector_2[2])\r\n new_vector.append(vector_1[0]*vector_2[1] - vector_1[1]*vector_2[0])\r\n # does the correct multiplications and subtractions \r\n\r\n return new_vector\r\n" }, { "alpha_fraction": 0.5609474182128906, "alphanum_fraction": 0.5701906681060791, "avg_line_length": 27.807018280029297, "blob_id": "c58bf7672e8634e44643a9eb957d81dc7661b68d", "content_id": "df4083194b6ca1b1e59299b4222c453b1242cdd3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1731, "license_type": "no_license", "max_line_length": 76, "num_lines": 57, "path": "/web-app/web-app-modules/web_app_tool.py", "repo_name": "Jean-Sim/linear-algbra-collection", "src_encoding": "UTF-8", "text": "\"\"\"\r\nto declear matrix use those () element inside saparated by ;\r\nto declear vector use those [] saparated by ,\r\nto declear element in vector use those , zo seperate numbers\r\nto separate diffrint inputs use |\r\n\"\"\"\r\n\r\ndef string_vector(lis):\r\n lis = lis.strip(\"[\")\r\n lis = lis.strip(\"]\")\r\n # removes unnecessary elements from string\r\n lis = lis.split(\",\")\r\n # split in to individual numbers\r\n for element in range(len(lis)):\r\n lis[element] = float(lis[element])\r\n # turns string in to numbers\r\n return lis\r\n\r\ndef string_matrix(mis):\r\n mis = mis.strip(\"(\")\r\n mis = mis.strip(\")\")\r\n # removes unnecessary elements from string\r\n mis = mis.split(\";\")\r\n # splits it into individual vector parts\r\n\r\n for element in range(len(mis)):\r\n mis[element] = string_vector(mis[element])\r\n # turns string in to list\r\n return mis\r\n\r\n\r\ndef final_step(st):\r\n st = st.split(\"|\")\r\n new_st = []\r\n # defines new list and splits the two inputs\r\n\r\n if \"([\" in st[0]:\r\n new_st.append(string_matrix(st[0]))\r\n elif \"[\" in st[0]:\r\n new_st.append(string_vector(st[0]))\r\n else:\r\n new_st.append(float(st[0]))\r\n # checks whether it is matrix, vector or num\r\n\r\n if \"([\" in st[1]:\r\n new_st.append(string_matrix(st[1]))\r\n elif \"[\" in st[1]:\r\n if type(new_st[0]) == float:\r\n new_st.append(string_vector(st[1]))\r\n else:\r\n new_st.insert(0, string_vector(st[1]))\r\n else:\r\n new_st.insert(0, float(st[0]))\r\n # checks whether it is matrix, vector or num\r\n # and inserts them in the correct position so that num , vector , matrix\r\n\r\n return new_st[0], new_st[1]\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.5878962278366089, "alphanum_fraction": 0.6109510064125061, "avg_line_length": 32.5, "blob_id": "9dc29e39fb74e037e7f2e670ac1f14416edbe91f", "content_id": "8228191992ad3107c7a486331e358567f8f3b2f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 347, "license_type": "no_license", "max_line_length": 67, "num_lines": 10, "path": "/web-app/web-app-modules/vector_addition.py", "repo_name": "Jean-Sim/linear-algbra-collection", "src_encoding": "UTF-8", "text": "def vector_addition(vec):\r\n from content.modules_for_Web_app.web_app_tool import final_step\r\n vector_1, vector_2 = final_step(vec)\r\n for element in range(len(vector_2)):\r\n try:\r\n vector_1[element] += vector_2[element]\r\n except IndexError:\r\n vector_1.append(vector_2[element])\r\n\r\n return vector_1\r\n\r\n" } ]
35
erikalexandertack/RugbyProject
https://github.com/erikalexandertack/RugbyProject
4dd8c183f1dcfe00cd3883e59f80d2904de4cfaa
ce71879dea67d9578ce65d7050a39dbe8b7b5f4f
4e6a78fbc6fe06980b605e870adbc46cb3f95027
refs/heads/main
2022-12-30T09:34:42.539252
2020-10-21T20:03:27
2020-10-21T20:03:27
302,742,067
2
0
null
null
null
null
null
[ { "alpha_fraction": 0.6098226308822632, "alphanum_fraction": 0.6166439056396484, "avg_line_length": 23.433332443237305, "blob_id": "101bf477a15bbd90194ee5780ccae9ee44d45206", "content_id": "b420f924c70f709f6cc3e998b85364a5f3997707", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 733, "license_type": "no_license", "max_line_length": 57, "num_lines": 30, "path": "/XMLtoJSON.py", "repo_name": "erikalexandertack/RugbyProject", "src_encoding": "UTF-8", "text": "from sportscode import parse_xml_file\nimport json\n\n\ndef main():\n teams = ['Rugby United New York','Houston Sabercats']\n filename = 'HOU_RUNY.xml'\n\n # Parse XML file\n events = parse_xml_file(filename,teams)\n\n\n #Need to use this in order to solve\n player_events = {}\n for event in events:\n player_name = event['player']\n if player_name not in player_events:\n player_events[player_name] = []\n player_events[player_name].append(event)\n else:\n player_events[player_name].append(event)\n \n\n # for player_name,events in player_events.items():\n # print(player_name,len(events))\n\n #To See Data\n print(json.dumps(events[0:200],indent=4))\n\nmain()\n" }, { "alpha_fraction": 0.540156364440918, "alphanum_fraction": 0.5408670902252197, "avg_line_length": 28.01030921936035, "blob_id": "43ace825eed8fc13f003669c9c17b49c857929c4", "content_id": "fa0cb83c9e813df4dc8c564b7c442e7613f95028", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2814, "license_type": "no_license", "max_line_length": 57, "num_lines": 97, "path": "/main.py", "repo_name": "erikalexandertack/RugbyProject", "src_encoding": "UTF-8", "text": "import xml.etree.ElementTree as ET\n\ndef parse_xml_file(fname,teams):\n \n root = _get_xml_root(fname)\n player_map = _generate_player_map(root,teams)\n\n instances = {}\n for instance in _instance_iterator(root):\n instance_dict = _parse_instance(instance)\n\n labels = []\n for label in _label_iterator(instance):\n group = label.find('group').text\n text = label.find('text').text\n if group == 'Game Clock':\n instance_dict['game_clock'] = text\n elif group in teams:\n instance_dict['team'] = group\n instance_dict['player'] = text\n elif group == 'X':\n instance_dict['x'] = int(text)\n elif group == 'Y':\n instance_dict['y'] = int(text)\n elif group == 'Field Area':\n instance_dict['field_area'] = text\n elif group == 'Field L-R':\n instance_dict['field_lr'] = text\n else:\n labels.append((group,text))\n\n if instance_dict['team'] is None:\n for team_name, players in player_map.items():\n if instance_dict['code'] in players:\n instance_dict['team'] = team_name\n\n instance_dict['labels'] = labels\n\n instances[instance_dict['iid']] = instance_dict\n \n events = [v for k,v in instances.items()]\n events = sorted(events,key=lambda e: e['start'])\n\n return events\n\n\n\ndef _generate_player_map(root,teams):\n player_map = {teams[0]:[],teams[1]:[]}\n for instance in _instance_iterator(root):\n for label in _label_iterator(instance):\n group = label.find('group').text\n text = label.find('text').text\n if group in teams:\n team = group\n player = text\n player_map[team].append(player)\n player_map = {k:set(v) for k,v in player_map.items()}\n\n return player_map\n\ndef _get_xml_root(fname):\n tree = ET.parse(fname)\n root = tree.getroot()\n\n return root\n\ndef _instance_iterator(root):\n for all_instances in root.iter('ALL_INSTANCES'):\n for instance in all_instances.iter('instance'):\n yield instance\n\ndef _label_iterator(instance):\n for label in instance.iter('label'):\n yield label\n\ndef _parse_instance(instance):\n iid = instance.find('ID').text\n start = instance.find('start').text\n end = instance.find('end').text\n code = instance.find('code').text\n\n instance_dict = {\n 'start': float(start),\n 'end': float(end),\n 'code': code,\n 'iid': iid,\n 'game_clock': None,\n 'team': None,\n 'player': None,\n 'x': None,\n 'y': None,\n 'field_lr': None,\n 'field_area': None\n }\n\n return instance_dict\n" }, { "alpha_fraction": 0.4150843918323517, "alphanum_fraction": 0.5108122229576111, "avg_line_length": 31.35897445678711, "blob_id": "46ae12b3b6ccb312f0aa8090a32184d6c6cf04a3", "content_id": "c8c7f3d920c655822d1a7a22d41f682e714cabdd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3792, "license_type": "no_license", "max_line_length": 96, "num_lines": 117, "path": "/BallPosition.py", "repo_name": "erikalexandertack/RugbyProject", "src_encoding": "UTF-8", "text": "from sportscode import parse_xml_file\nfrom test import draw_pitch\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport json\nimport xml.etree.ElementTree as ET\nimport seaborn as sns\n\n#You will haveto specify the game and the player and time period\n\n\n#plt.show()\n\ndef main():\n teams = ['Rugby United New York','Houston SaberCats']\n filename = 'HOU_RUNY.xml'\n count=0\n \n # Parse XML file\n events = parse_xml_file(filename,teams)\n \n plt.ion() # Interactive mode on\n label_blacklist = get_label_blacklist(teams)\n print(label_blacklist)\n\n for e in events:\n labels = e['labels']\n x = e['x']\n y = e['y']\n \n if x and y and labels:\n #if e['field_lr']=='R':\n if e['code']=='Connor Wallace-Sims':\n maxcount=500\n\n x = int(x)\n y = int(y)\n #Pitch Outline & Trylines\n plt.plot([0,0],[0,700], color=\"black\")#Ltry\n plt.plot([-150,-150],[0,700], color=\"black\")#Ldeadball\n plt.plot([-150,1150],[700,700], color=\"black\")#sidelineA\n plt.plot([1000,1000],[700,0], color=\"black\")#Rtry\n plt.plot([1150,-150],[0,0], color=\"black\")#SidelineB\n plt.plot([1150,1150],[0,700], color=\"black\")#Rdeadball\n\n #50, 22's, 5m \n plt.plot([220,220],[0,700], color=\"black\",)#L22\n plt.plot([770,770],[0,700], color=\"black\",)#R22\n plt.plot([500,500],[0,700], color=\"black\")#50m\n\n #Dashes, and text for 5 and 15m's\n plt.plot([525,475],[150,150], color=\"black\",linestyle=(0, (10, 25)))#15mB\n plt.plot([525,475],[550,550], color=\"black\",)#15mA\n plt.plot([195,245],[550,550], color=\"black\",)#15mA\n plt.plot([745,795],[550,550], color=\"black\",)#15mA\n plt.plot([195,245],[150,150], color=\"black\",)#15mB\n plt.plot([745,795],[150,150], color=\"black\",)#15mB\n plt.plot([950,50],[650,650], color=\"black\",linestyle=(0, (15, 20)))#5mA\n plt.plot([950,50],[50,50], color=\"black\",linestyle=(0, (15, 20)))#5mB\n plt.plot([50,50],[0,700], color=\"black\",linestyle=(0, (15, 25)), linewidth=1)\n plt.plot([950,950],[0,700], color=\"black\", linestyle=(0, (15, 25)), linewidth=1)\n ax.text(175, 155, r'2 2', fontsize=15) #L22 text\n ax.text(725, 155, r'2 2', fontsize=15) #R22 text\n ax.text(450, 155, r'5 0', fontsize=15) #50 text\n plt.plot([600,600],[0,700], color=\"black\",linestyle=(0, (15, 25)), linewidth=1)#\n plt.plot([400,400],[0,700], color=\"black\", linestyle=(0, (15, 25)), linewidth=1)\n \n\n #Goalposts\n plt.plot([0,0],[310,380], color=\"red\",linewidth=5)#Lpost\n plt.plot([1000,1000],[310,380], color=\"red\",linewidth=5)#Rtry\n plt.plot(x,y,'o')\n plt.show()\n\n \n\n \n count += 1\n if count == maxcount:\n break\n\n\n\ndef get_event_label(labels,blacklist):\n label = next((l[0] for l in labels if l[0] not in blacklist),None)\n\n return label\n\ndef get_label_blacklist(teams):\n blacklist = {\n 'Game Clock',\n 'Phase Number',\n 'Field Area',\n 'Field L-R',\n 'X',\n 'Y',\n 'Post-Tackle Assessment'\n }\n blacklist.add(teams[0])\n blacklist.add(teams[1])\n\n return blacklist\n\ndef _get_xml_root(fname):\n tree = ET.parse(fname)\n root = tree.getroot()\n\n return root\n\n\nif __name__ == '__main__':\n fig=plt.figure()\n\n fig.set_size_inches(7, 5)\n ax=fig.add_subplot(1,1,1)\n \n main()\n\n \n" }, { "alpha_fraction": 0.5663394331932068, "alphanum_fraction": 0.591304361820221, "avg_line_length": 22.427631378173828, "blob_id": "daf7eeea81040f795500b6a0d2489a7e37c77556", "content_id": "e6aa193351b784f21c22448e2726f88d2281e695", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3565, "license_type": "no_license", "max_line_length": 118, "num_lines": 152, "path": "/WorkCalculation.py", "repo_name": "erikalexandertack/RugbyProject", "src_encoding": "UTF-8", "text": "import csv\nimport matplotlib.pyplot as plt\n\n\ndef work_calculation (f):\n data = []\n with open(f,'r') as fin:\n rd = csv.reader(fin)\n for row in rd:\n data.append(row)\n\n #seperate header , difficulties calculating with strings included\n header = data[0]\n data.pop(0)\n\n #create variables\n teamRUNY = []\n opponent = []\n runy_key = [] #create dictionary for runy and opponents\n opp_key = []\n runy_olist = [] \n runy_dlist = []\n opp_olist= []\n opp_dlist = []\n\n #split team\n for i in data:\n if (i[0] == 'Rugby United New York'):\n teamRUNY.append(i) \n else:\n opponent.append(i)\n\n \n #offensive work function\n def offense(pos):\n x = pos\n ballCarries = int(x[6]) * .5\n ballCarryMetres = int(x[7]) * .04\n linebreaks = int(x[8]) * .3\n tackleBreaks = int(x[9]) * .2\n offloads = int(x[10]) * .02\n gainlineMade_percent = int(x[11]) \n attRMArrivals = int(x[16]) *.2\n lineoutTakes = int(x[24]) * .1\n \n work = (ballCarries+ ballCarryMetres + lineoutTakes + attRMArrivals + offloads + tackleBreaks + linebreaks)/100\n return work\n\n #defensive work function\n def defense(pos):\n x = pos\n madeTackles = int(x[12]) * .5 \n missedTackles =int(x[13]) * .08 \n madeTackle_percent =int(x[14])\n breakdownSteals =int(x[15]) * .1\n defRMArrivals =int(x[18]) * .3\n lineoutSteals =int(x[25]) * .1\n \n work = (madeTackles-missedTackles+breakdownSteals+defRMArrivals+lineoutSteals)/10\n return work\n\n\n def key_maker(team,team_key): \n x = 0\n for i in team:\n name = i[2]\n pos = i[1]\n team_key.append([name, pos])\n x+=1\n\n #create keys\n key_maker(teamRUNY,runy_key)\n key_maker(opponent, opp_key)\n\n\n #loop through offensive function to return calculation for both teams\n def work_calc(team,list1,list2):\n print('Entering work_calc function with:')\n print('Team=',team)\n print('list1=',list1)\n print('list2=',list2)\n index = 0\n for i in team: \n list1.append(offense(i))\n list2.append(defense(i))\n index +=1\n print('exiting work_calc with')\n print('Team=',team)\n print('list1=',list1)\n print('list2=',list2)\n \n work_calc(teamRUNY,runy_olist, runy_dlist)\n work_calc(opponent, opp_olist, opp_dlist)\n\n runy_names= []\n opponent_names = []\n\n t=0\n def nameList(team,n_list):\n for i in team:\n n_list.append(i[2])\n \n nameList(teamRUNY, runy_names)\n nameList(opponent, opponent_names)\n \n #calculate total individual work\n indi_runyP = []\n indi_oppP = []\n \n def indi_calc (team,list1,list2,tList):\n for i in team:\n keySum = list1 + list2\n tList.append(keySum) \n\n indi_calc(teamRUNY,runy_olist,runy_dlist,indi_runyP)\n indi_calc(opponent,opp_olist,opp_dlist,indi_oppP)\n\n\n #calculate team work\n teamsum = sum(runy_olist) + sum(runy_dlist)\n oppsum = sum(opp_olist) + sum(opp_dlist)\n\n print (\"team total sum for \"+str(teamRUNY[0][0])+\" is \" + str(teamsum))\n print (\"team total sum for \"+str(opponent[0][0])+\" is \" + str(oppsum)+\"\\n\")\n\n #plot the values\n #x1 = runy_names\n #x2 = opponent_names\n #y1 = indi_runyP\n #y2 = indi_oppP\n \n\n #plt.bar(x1,y1)\n #plt.bar(x2,y2)\n\n #plt.title('Runy Work Rate')\n #plt.xlabel('Players')\n #plt.ylabel('Work rate')\n\n\n #plt.show()\n\ngame =[\n 'wk1_runy_ne.csv', \n 'wk2_runy_aus.csv', \n 'wk3_runy_atl.csv', \n 'wk4_runy_hou.csv',\n 'wk5_runy_sd.csv']\n\n#work_calculation(game[0])\nfor i in game:\n work_calculation(i)\n \n\n" }, { "alpha_fraction": 0.568903386592865, "alphanum_fraction": 0.5853121876716614, "avg_line_length": 45.4179573059082, "blob_id": "d282cf743767067f0de48700774006a4cc8253f0", "content_id": "ecd8a944468b04e7e9afdecb6e014e8b69c3e215", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14992, "license_type": "no_license", "max_line_length": 113, "num_lines": 323, "path": "/PlotMomentum.py", "repo_name": "erikalexandertack/RugbyProject", "src_encoding": "UTF-8", "text": "import xml.etree.ElementTree as ET\nimport csv\nimport matplotlib.pyplot as plt\nfrom sportscode import parse_xml_file\nimport json\nfrom matplotlib import style\n\n\n#style.use('bmh')\n\ndef main():\n\n # Edit these\n \n #teams = ['Rugby ATL','Rugby United New York']\n #teams = ['Austin Gilgronis','Rugby United New York']\n #teams = ['San Diego Legion','Rugby United New York']\n teams = ['Houston Sabercats','Rugby United New York']\n #teams = ['New England Freejacks','Rugby United New York']\n\n #filename = 'ATL_RUNY.xml'\n #filename = 'RUNY_AUS.xml'\n #filename = 'HOU_RUNY.xml'\n filename = 'HOU_RUNY.xml'\n #filename = 'NE_RUNY.xml'\n \n # Initialize variables\n momentum_teams_R = {t:0 for t in teams}\n momentum_teams_L = {t:0 for t in teams}\n momentum_teams = {t:0 for t in teams}\n score_lookup = get_scoring_chart()\n total_momentum = []\n total_momentum_L = []\n total_momentum_R = []\n\n # Parse XML file\n events = parse_xml_file(filename,teams)\n events = [e for e in events if e['team'] is not None]\n side_events = parse_xml_file(filename,teams)\n side_events = [e for e in side_events if e['team'] is not None]\n\n for event in events:\n\n\n # Find the event in the instance with the greatest point impact\n max_event = max(event['labels'],key=lambda l: abs(score_lookup.get(l,0)))\n max_score = score_lookup.get(max_event,0)\n momentum_teams[event['team']] += max_score\n\n # Look for special events for plotting\n points_scored = any(l[0] == 'Points Scored' for l in event['labels'])\n yellow_card = any(l[1] == 'Yellow Card' for l in event['labels'])\n\n # Calculate Running Score\n current_momentum = momentum_teams[teams[1]] - momentum_teams[teams[0]]\n \n # Update event object to add to the total momentum list\n event['current_momentum'] = current_momentum\n event['points_scored'] = points_scored\n event['yellow_card'] = yellow_card\n total_momentum.append(event)\n\n for event in side_events:\n y = event['y']\n time = event['start']\n field_position=event['field_lr']\n\n if field_position =='L':\n\n # Find the event in the instance with the greatest point impact\n max_event = max(event['labels'],key=lambda l: abs(score_lookup.get(l,0)))\n max_score = score_lookup.get(max_event,0)\n momentum_teams_L[event['team']] += max_score\n\n # Look for special events for plotting\n points_scored = any(l[0] == 'Points Scored' for l in event['labels'])\n yellow_card = any(l[1] == 'Yellow Card' for l in event['labels'])\n\n # Calculate Running Score\n current_momentum = momentum_teams_L[teams[1]] - momentum_teams_L[teams[0]]\n \n # Update event object to add to the total momentum list\n event['current_momentum'] = current_momentum\n event['points_scored'] = points_scored\n event['yellow_card'] = yellow_card\n total_momentum_L.append(event)\n\n elif field_position =='R':\n\n # Find the event in the instance with the greatest point impact\n max_event = max(event['labels'],key=lambda l: abs(score_lookup.get(l,0)))\n max_score = score_lookup.get(max_event,0)\n momentum_teams_R[event['team']] += max_score\n\n # Look for special events for plotting\n points_scored = any(l[0] == 'Points Scored' for l in event['labels'])\n yellow_card = any(l[1] == 'Yellow Card' for l in event['labels'])\n\n # Calculate Running Score\n current_momentum = momentum_teams_R[teams[1]] - momentum_teams_R[teams[0]]\n \n # Update event object to add to the total momentum list\n event['current_momentum'] = current_momentum\n event['points_scored'] = points_scored\n event['yellow_card'] = yellow_card\n total_momentum_R.append(event)\n \n \n \n ax1 = plt.subplot2grid((4,1), (0,0), rowspan=1, colspan=1)\n ax2 = plt.subplot2grid((4,1), (1,0), rowspan=1, colspan=1)\n ax3 = plt.subplot2grid((4,1), (2,0), rowspan=2, colspan=1)\n\n x,y,team_1_time, team_1_points , team_2_time, team_2_points = plot_momentum(total_momentum_R,teams)\n ax1.set_title('Right-Hand side Momentum (Full Match)')\n ax1.plot(x,y,'r')\n\n x,y,team_1_time, team_1_points , team_2_time, team_2_points = plot_momentum(total_momentum_L,teams)\n ax2.set_title('Left-Hand side Momentum (Full Match)')\n ax2.plot(x,y,'r')\n\n x,y,team_1_time, team_1_points , team_2_time, team_2_points = plot_momentum(total_momentum,teams)\n\n # plt.xlabel('Time')\n # plt.ylabel('Momentum')\n # ax3.set_title('RUNY HOU Full Momentum')\n # ax3.plot(team_1_time,team_1_points,'s',color='orange', label='RUNY Score')\n # ax3.plot(team_2_time,team_2_points,'s',color='red',label='HOU Score')\n # ax3.legend()\n # ax3.plot(x,y,'blue')\n\n #plt.subplots_adjust(wspace=6)\n # plt.tight_layout(rect=[0, 0, 1, 0.95])\n # #plt.show()\n # plt.savefig('RugbyHOU.png')\n print(total_momentum[1])\n\ndef plot_momentum(tot_momentum,teams):\n time = [e['start'] for e in tot_momentum]\n momentum = [e['current_momentum'] for e in tot_momentum]\n\n team_1_time = [e['start'] for e in tot_momentum if e['team'] == teams[0] and e['points_scored']]\n team_1_points = [e['current_momentum'] for e in tot_momentum if e['team'] == teams[0] and e['points_scored']]\n #plt.plot(team_1_time,team_1_points,'s',color='orange')\n\n team_2_time = [e['start'] for e in tot_momentum if e['team'] == teams[1] and e['points_scored']]\n team_2_points = [e['current_momentum'] for e in tot_momentum if e['team'] == teams[1] and e['points_scored']]\n #plt.plot(team_2_time,team_2_points,'s',color='red')\n \n return time, momentum, team_1_time, team_1_points , team_2_time, team_2_points \n\n\ndef print_to_csv(events):\n with open('output.csv','w') as fout:\n wr = csv.writer(fout)\n wr.writerow(['start','end','game_clock','code','group','text'])\n for event in events:\n wr.writerow(event)\n\ndef get_scoring_chart():\n base_carry = 1\n base_kick = 5\n turnover = 15\n yellow_card = 30\n penalty = 20\n conversion = 20\n kick_at_goal = 25\n try_scored = 50\n lineout = 3\n maul = 5\n catch = 3\n base_pass = 2\n ruck = 1\n scrum = 3\n tackle = 2\n\n return {\n ('Ball Carry', '30-40m - Metres Gained'): base_carry*20,\n ('Ball Carry', '2-5m - Metres Gained'): base_carry*2,\n ('Ball Carry', 'Ball Carry Quality - Ineffective'): -base_carry,\n ('Ball Carry', '5-10m - Metres Gained'): base_carry*3,\n ('Ball Carry', 'Line Break'): base_carry*10,\n ('Ball Carry', 'Ball Carry Quality - Effective'): base_carry,\n ('Ball Carry', '0-2m - Metres Gained'): base_carry,\n ('Ball Carry', '20m-30m - Metres Gained'): base_carry*10,\n ('Ball Carry', '10-20m - Metres Gained'): base_carry*5,\n ('Ball Carry', 'Ball Carry Quality - Turnover Ineffective'): -turnover,\n ('Ball In Play', 'Ball in Play - Lineout'): 0,\n ('Ball In Play', 'Ball in Play - Scrum'): 0,\n ('Ball In Play', 'Ball in Play'): 0,\n ('Ball Steal', 'Ball Steal - Ruck'): turnover,\n ('Ball Steal', 'Ball Steal - Tackle'): turnover,\n ('Ball Steal', 'Ball Steal - Set Piece'): turnover,\n ('Cards', 'Yellow Card'): -yellow_card,\n ('Catch', 'Catch Quality - Won Possession'): 10,\n ('Catch', 'Catch Quality - Turnover Ineffective'): -turnover,\n ('Catch', 'Catch Quality - Ineffective'): -catch,\n ('Catch', 'Catch Quality - Effective'): catch,\n ('Conversion', 'Conversion - Made'): conversion,\n ('Conversion', 'Conversion - Total'): conversion,\n ('Counter Attack', 'Counter Attack - From Kick'): 0,\n ('Free Kick', 'Fair Catch'): 0,\n ('Free Kick', 'Free Kick'): 0,\n ('Free Kick Conceded', 'Free Kick Conceded'): -turnover,\n ('Free Kick Infringement', 'Free Kick Infringement Reason - Not Taking Hit'): 0,\n ('Free Kick Infringement', 'Free Kick Infringement - Others'): 0,\n ('Free Kick Infringement', 'Free Kick Infringement Reason - Closing Gap'): 0,\n ('Free Kick Infringement', 'Free Kick Infringement Reason - Others'): 0,\n ('Free Kick Infringement', 'Free Kick Infringement Reason - Early Push'): 0,\n ('Free Kick Infringement', 'Free Kick Infringement - Lineout'): 0,\n ('Free Kick Infringement', 'Free Kick Infringement - Scrum'): 0,\n ('Gainline', 'Gainline Neutral'): base_carry,\n ('Gainline', 'Gainline Lost'): -base_carry,\n ('Gainline', 'Gainline +'): base_carry*2,\n ('Gainline', 'Gainline -'): -base_carry*2,\n ('Gainline', 'Gainline Over'): base_carry,\n ('Handling Error', 'Handling Error'): -5,\n ('Kick', '10-20m - Metres Gained'): 0,\n ('Kick', 'Territory Kick'): 0,\n ('Kick', 'Short Kick Result - Effective'): base_kick,\n ('Kick', '20m-30m - Metres Gained'): base_kick*2,\n ('Kick', '0-2m - Metres Gained'): -base_kick,\n ('Kick', 'Long Kick Result - Turnover Effective'): base_kick*3,\n ('Kick', 'Short Kick'): 0,\n ('Kick', '40m+ - Metres Gained'): base_kick*4,\n ('Kick', '2-5m - Metres Gained'): -base_kick,\n ('Kick', '30-40m - Metres Gained'): base_kick*3,\n ('Kick', 'Long Kick Result - Turnover Ineffective - Out On Full'): -base_kick,\n ('Kick', 'Long Kick Result - Turnover Ineffective'): -base_kick,\n ('Kick', 'Short Kick Result - Turnover Ineffective'): -base_kick,\n ('Kick', 'Short Kick Result - Ineffective'): 0,\n ('Kick', 'Touch Kick'): 0,\n ('Kicks at Goal Result', 'Kicks at Goal - Made'): kick_at_goal,\n ('Kicks at Goal Result', 'Kicks at Goal - Missed'): 0,\n ('Lineout', 'Lineout - Effective'): lineout,\n ('Lineout', 'Lineout - 5 Man'): 0,\n ('Lineout', 'Lineout - Ineffective'): 0,\n ('Lineout', 'Lineout - 5 + 1'): 0,\n ('Lineout', 'Lineout - 6 + 1'): 0,\n ('Lineout', 'Lineout - Turnover Ineffective'): -lineout*3,\n ('Lineout', 'Lineout - Full'): 0,\n ('Lineout', 'Lineout - 4 Man'): 0,\n ('Lineout', 'Lineout - 6 Man'): 0,\n ('Maul', 'Maul - Turnover'): -maul*2,\n ('Maul', 'Maul - Retained'): maul,\n ('Pass', 'Other Pass - Effective'): base_pass,\n ('Pass', 'Other Pass - Turnover Ineffective'): -turnover,\n ('Pass', 'Offload - Turnover Ineffective'): -turnover,\n ('Pass', 'Offload - Effective'): base_pass,\n ('Pass', 'Offload - Ineffective'): 0,\n ('Pass', 'Other Pass - Ineffective'): 0,\n ('Penalty Infringement', 'Penalty Infringement Reason - Other'): -penalty,\n ('Penalty Infringement', 'Penalty Infringement Reason - High Tackle'): -penalty,\n ('Penalty Infringement', 'Penalty Infringement - Tackle'): -penalty,\n ('Penalty Infringement', 'Penalty Infringement Reason - A Defender'): -penalty,\n ('Penalty Infringement', 'Penalty Infringement - Maul'): -penalty,\n ('Penalty Infringement', 'Penalty Conceded'): -penalty,\n ('Penalty Infringement', 'Penalty Infringement Reason - Taking Out - Obstruction'): -penalty,\n ('Penalty Infringement', 'Penalty Infringement - Ruck'): -penalty,\n ('Penalty Infringement', 'Penalty Infringement - Foul Play'): -penalty,\n ('Penalty Infringement', 'Penalty Infringement Reason - Entry'): -penalty,\n ('Penalty Infringement', 'Penalty Infringement Reason - Not Rolling Away'): -penalty,\n ('Penalty Infringement', 'Penalty Infringement Reason - Charging'): -penalty,\n ('Penalty Infringement', 'Penalty Infringement Reason - Not Releasing'): -penalty,\n ('Penalty Infringement', 'Penalty Infringement Reason - Collapsing'): -penalty,\n ('Penalty Infringement', 'Penalty Infringement - Lineout'): -penalty,\n ('Penalty Infringement', 'Penalty Infringement - Scrum'): -penalty,\n ('Penalty Infringement', 'Penalty Infringement - Offside'): -penalty,\n ('Penalty Infringement', 'Penalty Infringement Reason - Boring In'): -penalty,\n ('Penalty Infringement', 'Penalty Infringement Reason - Stapling Off - Defence'): -penalty,\n ('Penalty Infringement', 'Penalty Infringement Reason - Backline'): -penalty,\n ('Penalty Infringement', 'Penalty Infringement Reason - Standing Up'): -penalty,\n ('Penalty Shot', 'Penalty Shot - Missed'): 0,\n ('Penalty Shot', 'Penalty Shot - Made'): kick_at_goal,\n ('Penalty Shot', 'Penalty Shot - Total'): 0,\n ('Penalty Start', 'Tap Penalty'): 0,\n ('Penalty Start', 'Penalty Kick - Effective'): base_kick*3,\n ('Points Scored', 'Conversion'): conversion,\n ('Points Scored', 'Try'): try_scored,\n ('Points Scored', 'Penalty Shot'): kick_at_goal,\n ('Points Scored', 'Penalty Try'): try_scored,\n ('Ruck', 'Ruck - Retained'): ruck,\n ('Ruck', 'Ruck - Slow'): -ruck*2,\n ('Ruck', 'Ruck - Fast'): ruck*2,\n ('Ruck', 'Ruck - Not Completed'): 0,\n ('Scrum', 'Scrum - Pre-Ball stability'): 0,\n ('Scrum', 'Scrum - Completed'): 0,\n ('Scrum', 'Scrum - Reset'): 0,\n ('Scrum', 'Scrum - Popped'): 0,\n ('Scrum', 'Scrum - Effective'): scrum,\n ('Scrum', 'Scrum - Turnover Ineffective'): 0,\n ('Scrum', 'Scrum - Turnover'): 0,\n ('Scrum', 'Scrum - Collapsed'): 0,\n ('Scrum', 'Scrum - Free Kick'): 0,\n ('Scrum', 'Scrum - Pre-Engage'): 0,\n ('Scrum', 'Scrum - Penalty'): 0,\n ('Start 22', 'Start 22 - Turnover'): 0,\n ('Start 22', 'Start 22 - Short'): 0,\n ('Start Half', 'Start Half - Long'): 0,\n ('Start Half', 'Start Half - Regained'): turnover,\n ('Start Half', 'Start Half - Turnover'): -turnover,\n ('Start Half', 'Start Half - Short'): 0,\n ('Stoppages', 'Stoppages - Held Up Ingoal'): 0,\n ('Stoppages', 'Stoppages - Ball Lost Forward'): -turnover,\n ('Stoppages', 'Stoppages - Unplayable from Kick'): 0,\n ('Stoppages', 'Stoppages - Forward Pass'): -5,\n ('Stoppages', 'Stoppages - Unplayable'): 0,\n ('Stoppages', 'Stoppages - Knock On'): -turnover,\n ('Tackle', 'Quality - Ball Control Lost'): tackle*2,\n ('Tackle', 'Active Tackle'): tackle,\n ('Tackle', 'Quality - Made Tackle'): tackle,\n ('Tackle', 'Quality - Impeded Progress'): tackle*2,\n ('Tackle', 'Try Saver Tackle'): tackle*3,\n ('Tackle', 'Quality - Turnover Tackle'): turnover,\n ('Tackle', 'Quality - Missed Tackle'): -tackle*2,\n ('Tackle', 'Quality - Dominant Tackle'): tackle*4,\n ('Tackle', 'Assist Tackle'): 0,\n ('Turnover Conceded', 'Turnover Conceded'): -turnover\n }\n\n\nif __name__ == '__main__':\n main()" }, { "alpha_fraction": 0.47898033261299133, "alphanum_fraction": 0.6395348906517029, "avg_line_length": 37.534481048583984, "blob_id": "65e19f72945938ead142196d0f664565ed86295e", "content_id": "098965b9fc686bfbac1e34b9d6e0dcfaaa376152", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2236, "license_type": "no_license", "max_line_length": 84, "num_lines": 58, "path": "/drawpitch.py", "repo_name": "erikalexandertack/RugbyProject", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\nimport json\nfrom pandas.io.json import json_normalize\nimport numpy as np\nimport seaborn as sns\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib.patches import Arc, Rectangle, ConnectionPatch\nfrom matplotlib.offsetbox import OffsetImage\nimport squarify\nfrom functools import reduce\n\ndef draw_pitch(ax):\n # size of the pitch is 120, 80\n #Create figure\n\n #Pitch Outline & Trylines\n plt.plot([0,0],[0,700], color=\"black\")#Ltry\n plt.plot([-150,-150],[0,700], color=\"black\")#Ldeadball\n plt.plot([-150,1150],[700,700], color=\"black\")#sidelineA\n plt.plot([1000,1000],[700,0], color=\"black\")#Rtry\n plt.plot([1150,-150],[0,0], color=\"black\")#SidelineB\n plt.plot([1150,1150],[0,700], color=\"black\")#Rdeadball\n\n #50, 22's, 5m \n plt.plot([220,220],[0,700], color=\"black\",)#L22\n plt.plot([770,770],[0,700], color=\"black\",)#R22\n plt.plot([500,500],[0,700], color=\"black\")#50m\n\n #Dashes, and text for 5 and 15m's\n plt.plot([525,475],[150,150], color=\"black\",linestyle=(0, (10, 25)))#15mB\n plt.plot([525,475],[550,550], color=\"black\",)#15mA\n plt.plot([195,245],[550,550], color=\"black\",)#15mA\n plt.plot([745,795],[550,550], color=\"black\",)#15mA\n plt.plot([195,245],[150,150], color=\"black\",)#15mB\n plt.plot([745,795],[150,150], color=\"black\",)#15mB\n plt.plot([950,50],[650,650], color=\"black\",linestyle=(0, (15, 20)))#5mA\n plt.plot([950,50],[50,50], color=\"black\",linestyle=(0, (15, 20)))#5mB\n plt.plot([50,50],[0,700], color=\"black\",linestyle=(0, (15, 25)), linewidth=1)\n plt.plot([950,950],[0,700], color=\"black\", linestyle=(0, (15, 25)), linewidth=1)\n ax.text(175, 155, r'2 2', fontsize=15) #L22 text\n ax.text(725, 155, r'2 2', fontsize=15) #R22 text\n ax.text(450, 155, r'5 0', fontsize=15) #50 text\n plt.plot([600,600],[0,700], color=\"black\",linestyle=(0, (15, 25)), linewidth=1)#\n plt.plot([400,400],[0,700], color=\"black\", linestyle=(0, (15, 25)), linewidth=1)\n \n\n #Goalposts\n plt.plot([0,0],[310,380], color=\"red\",linewidth=5)#Lpost\n plt.plot([1000,1000],[310,380], color=\"red\",linewidth=5)#Rtry\n\n\n\nfig=plt.figure()\nfig.set_size_inches(7, 5)\nax=fig.add_subplot(1,1,1)\ndraw_pitch(ax)\nplt.show()\n\n" } ]
6
und3f1n3d5/ServerBot
https://github.com/und3f1n3d5/ServerBot
88152da689ca8b94f7dc254f31f04585d53bf8ed
5be3d1496dbd5567653d4d9e2e6aa7dadf46ceab
048710227cabdbfc6357c89f520fc877996b6940
refs/heads/master
2023-02-26T23:53:17.462448
2021-02-02T17:20:23
2021-02-02T17:20:23
315,085,810
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4834437072277069, "alphanum_fraction": 0.7152317762374878, "avg_line_length": 15.777777671813965, "blob_id": "a3590cebdbf17ff8b1750d1bcfce13652845a8f8", "content_id": "c4c53b56f6acfd4707de3917552b8ef243f12159", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 151, "license_type": "no_license", "max_line_length": 23, "num_lines": 9, "path": "/requirements.txt", "repo_name": "und3f1n3d5/ServerBot", "src_encoding": "UTF-8", "text": "certifi==2020.11.8\nchardet==3.0.4\nDateTime==4.3\nidna==2.10\npyTelegramBotAPI==3.7.4\npytz==2020.4\nrequests==2.25.0\nurllib3==1.26.2\nzope.interface==5.2.0\n" }, { "alpha_fraction": 0.5467830300331116, "alphanum_fraction": 0.5534808039665222, "avg_line_length": 39.55144119262695, "blob_id": "99dd8d12269ae714e39bfd453caba76f892ebeb1", "content_id": "aa06fd182a29dc8c2aef7ca34fc7c07233219bc8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10834, "license_type": "no_license", "max_line_length": 121, "num_lines": 243, "path": "/main.py", "repo_name": "und3f1n3d5/ServerBot", "src_encoding": "UTF-8", "text": "import telebot\n#import time\nimport datetime\n\ntoken = \"\"\n\n\nclass BotHandler:\n\n def __init__(self, token):\n self.users_write = None\n self.users_read = open(\"users.txt\", \"r\")\n self.users = dict()\n flag = 0\n i = \"\"\n for user in self.users_read:\n if user == \"#\\n\":\n flag = 1\n continue\n if flag == 2:\n self.users[i].add_event(user[:-1])\n if flag == 1:\n i = user[:-1]\n self.users[i] = User(user[:-1])\n flag += 1\n self.token = token\n self.api_url = \"https://api.telegram.org/bot1411772657:AAFkziVjMcehzkWDRWJyrnt7au7EBqDL9nQ/\"\n\n def send_message(self, chat_id, text):\n bot.send_message(chat_id, text)\n\n def update_users(self, i):\n if i not in self.users:\n self.users[i] = User(i)\n\n def remind(self):\n for user in self.users:\n message = self.users[user].get_next_event()\n if message and self.users:\n self.send_message(user, message)\n\n def add_events(self, user_id, update):\n try:\n self.users[str(user_id)].add_event(update)\n self.send_message(user_id, \"Добавлено\")\n except Exception as e:\n errors = open(\"error.txt\", \"a\")\n errors.write(str(e) + \"in add events, message:\" + update)\n errors.close()\n self.send_message(user_id, \"Некорректный формат\")\n\n def remove_events(self, user_id, remove):\n try:\n self.users[str(user_id)].remove_event(remove)\n self.send_message(user_id, \"Удалено\")\n except Exception as e:\n errors = open(\"error.txt\", \"a\")\n errors.write(str(e) + \"in remove events, message:\" + remove)\n errors.close()\n self.send_message(user_id, \"Некорректный формат или нет такого события\")\n\n def start_update(self, user_id):\n self.send_message(user_id, \"Введите событие, которое хотите добавить в формате как в примере:\\n\\t Thu 12:20 \"\n \"<Кр по ОКТЧ>\\n (<> - обязательны!)\\nПосле того как удалите все ненужные события (\"\n \"каждое в новом сообщении) введите команду /stop_adding, чтобы сохранить изменения\")\n self.users[user_id].is_updating = True\n\n def start_removing(self, user_id):\n self.send_message(user_id, \"Введите событие, которое хотите удалить в формате как в примере:\\n\\t Thu 12:20 \"\n \"<Кр по ОКТЧ>\\n (<> - обязательны!)\\nПосле того как удалите все ненужные события (\"\n \"каждое в новом сообщении) введите команду /stop_removing, чтобы сохранить изменения\")\n self.users[user_id].is_removing = True\n\n def end_update(self, user_id):\n self.users[user_id].is_updating = False\n self.send_message(user_id, \"Добавление успешно завершено\")\n\n def end_removing(self, user_id):\n self.users[user_id].is_removing = False\n self.send_message(user_id, \"Удаление успешно завершено\")\n\n def reset(self, user_id):\n self.users[user_id].events.clear()\n self.send_message(user_id, \"Успешно очищено!\")\n\n def help(self, chat_id):\n self.send_message(chat_id,\n \"\\t /start - начать общение\\n\\t/help - вывести список доступных команд\\n\\t/add_events \"\n \"- добавить события в расписание\\n\\t/remove_events - удалить события из \"\n \"расписания\\n\\t/stop_adding - закончить обновление\\n\\t/stop_removing - закончить \"\n \"удаление\\n\\t/reset - очистить расписание\\n\\t/show_timetable - показать текущее расписание\")\n\n def sync(self):\n self.users_write = open(\"users.txt\", \"w\")\n for user in self.users:\n self.users[user].write(self.users_write)\n self.users_write.close()\n\n def show_timetable(self, user_id):\n ans = \"\"\n for event in self.users[user_id].events:\n ans += event.day + \" \" + str(event.hour) + \":\" + str(event.minute) + \" \" + event.message + \"\\n\"\n if not ans:\n ans = \"Кажется у вас нет ни одного события!\"\n self.send_message(user_id, ans)\n\n def start_message(self, chat_id):\n self.update_users(chat_id)\n self.send_message(chat_id, 'Приветствую, друг! Я бот, который может напоминать тебе о событиях. Нажми '\n '/help, чтобы увидеть мои команды')\n\n def refresh(self):\n for user in self.users:\n self.users[user].refresh()\n\n\nclass User:\n def __init__(self, string):\n self.days = [\"Mon\", \"Tue\", \"Wen\", \"Thu\", \"Fri\", \"Sat\", \"Sun\"]\n self.id = string\n self.events = set()\n self.is_updating = False\n self.is_removing = False\n\n def add_event(self, string):\n event = Event(string)\n self.events.add(event)\n\n def remove_event(self, string):\n event = Event(string)\n to_rm = event\n for e in self.events:\n if e.day == event.day and e.minute == event.minute and e.message == event.message:\n to_rm = e\n self.events.remove(to_rm)\n\n def get_next_event(self):\n now = datetime.datetime.now()\n day = self.days[now.weekday()]\n hour = now.hour\n minute = now.minute\n for event in self.events:\n if event.day == day and event.hour == hour and event.minute == minute and event.reminded == 0:\n event.reminded = 1\n return event.message\n return None\n\n def write(self, file):\n file.write(\"#\\n\")\n file.write(self.id + \"\\n\")\n for event in self.events:\n file.write(event.day + \" \" + str(event.hour) + \":\" + str(event.minute) + \" <\" + event.message + \">\\n\")\n\n def refresh(self):\n for event in self.events:\n event.reminded = False\n\n\nclass Event:\n def __init__(self, string):\n self.day = string[:string.find(\" \")]\n self.hour = int(string[string.find(\" \") + 1: string.find(\":\")])\n self.minute = int(string[string.find(\":\") + 1: string.find(\"<\")])\n self.message = string[string.find(\"<\") + 1: string.find(\">\")]\n self.reminded = 0\n\n\ntime_bot = BotHandler(token)\nbot = telebot.TeleBot('1411772657:AAFkziVjMcehzkWDRWJyrnt7au7EBqDL9nQ')\n\n\ndef main():\n new_offset = None\n\n while True:\n try:\n upds = bot.get_updates(offset=new_offset)\n if upds:\n last_update = upds[-1]\n last_update_id = last_update.update_id\n last_chat_text = last_update.message.text\n last_chat_id = str(last_update.message.chat.id)\n time_bot.update_users(last_chat_id)\n\n in_process = time_bot.users[last_chat_id].is_removing or time_bot.users[last_chat_id].is_updating\n\n if last_chat_text.find(\"/start\") != -1 and not in_process:\n time_bot.start_message(last_chat_id)\n elif in_process and last_chat_text.find(\"/start\") != -1:\n time_bot.send_message(last_chat_id, \"Сначала завершите процесс добавления/удаления\")\n\n if last_chat_text.find(\"/help\") != -1 and not in_process:\n time_bot.help(last_chat_id)\n elif in_process and last_chat_text.find(\"/help\") != -1:\n time_bot.send_message(last_chat_id, \"Сначала завершите процесс добавления/удаления\")\n\n if last_chat_text.find(\"/reset\") != -1 and not in_process:\n time_bot.reset(last_chat_id)\n elif in_process and last_chat_text.find(\"/reset\") != -1:\n time_bot.send_message(last_chat_id, \"Сначала завершите процесс добавления/удаления\")\n\n if last_chat_text.find(\"/show_timetable\") != -1 and not in_process:\n time_bot.show_timetable(last_chat_id)\n elif in_process and last_chat_text.find(\"/show_timetable\") != -1:\n time_bot.send_message(last_chat_id, \"Сначала завершите процесс добавления/удаления\")\n\n if last_chat_text.find(\"/add_events\") != -1 and not in_process:\n time_bot.start_update(last_chat_id)\n elif last_chat_text.find(\"/add_events\") != -1 and time_bot.users[last_chat_id].is_removing:\n time_bot.send_message(last_chat_id, \"Сначала завершите процесс удаления\")\n elif last_chat_text.find(\"/stop_adding\") != -1:\n time_bot.end_update(last_chat_id)\n elif time_bot.users[last_chat_id].is_updating:\n time_bot.add_events(last_chat_id, last_chat_text)\n\n if last_chat_text.find(\"/remove_events\") != -1 and not in_process:\n time_bot.start_removing(last_chat_id)\n elif last_chat_text.find(\"/remove_events\") != -1 and time_bot.users[last_chat_id].is_updating:\n time_bot.send_message(last_chat_id, \"Сначала завершите процесс добавления\")\n elif last_chat_text.find(\"/stop_removing\") != -1:\n time_bot.end_removing(last_chat_id)\n elif time_bot.users[last_chat_id].is_removing:\n time_bot.remove_events(last_chat_id, last_chat_text)\n\n new_offset = last_update_id + 1\n\n time_bot.remind()\n now = datetime.datetime.now()\n if now.minute == 0:\n time_bot.sync()\n if now.day == \"Mon\" and now.hour == 0:\n time_bot.refresh()\n except Exception as e:\n errors = open(\"error.txt\", \"a\")\n errors.write(str(e) + \"in main\")\n errors.close()\n\n\nif __name__ == '__main__':\n try:\n main()\n except KeyboardInterrupt:\n exit()\n" } ]
2
taylorcavazos/BIMM185
https://github.com/taylorcavazos/BIMM185
2d4dc86bae22fc3177cc878952ae55b31edc4b1e
eea9853d71d8f7212375453722887d5fb59a69df
e86886badc9a0b882404ddec253a1e216891a187
refs/heads/master
2021-06-18T08:26:47.155585
2017-06-11T06:39:24
2017-06-11T06:39:24
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6287371516227722, "alphanum_fraction": 0.6376617550849915, "avg_line_length": 35.73770523071289, "blob_id": "accba05087a75e549ef4593a9b13984c57bbe14d", "content_id": "fc2093fe1d6a8261435dd1ab3981fd518973c7ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2241, "license_type": "no_license", "max_line_length": 104, "num_lines": 61, "path": "/codon_frequencies.py", "repo_name": "taylorcavazos/BIMM185", "src_encoding": "UTF-8", "text": "### Function that estimates the frequencies of codons \n### INPUT: file containing bnumber gene separated by sequence\n### OUTPUT: table of gene name codon frequency and length of gene\n\nimport sys\nimport textwrap\nfrom collections import OrderedDict\n\n### read in bnumber sequence file\ngene_file = sys.argv[1]\ngenes_seq = open(gene_file).read().splitlines()\n\n### open output file\noutput = open('codon_count_table.txt', 'w')\n\n### list of 64 codons\ncodons = ['ATG', 'ATT', 'ATC', 'ATA', 'CTT', 'CTC', 'CTA', 'CTG', 'TTA', 'TTG',\n'GTT', 'GTC', 'GTA', 'GTG', 'TTT', 'TTC', 'TGT', 'TGC', 'GCT', 'GCC', 'GCA', 'GCG',\n'GGT', 'GGC', 'GGA', 'GGG', 'CCT', 'CCC', 'CCA', 'CCG', 'ACT', 'ACC', 'ACA', 'ACG', 'TCT', 'TCC', 'TCA',\n 'TCG', 'AGT', 'AGC', 'TAT', 'TAC', 'TGG', 'CAA', 'CAG', 'AAT', 'AAC', 'CAT', 'CAC', 'GAA', 'GAG',\n'GAT', 'GAC', 'AAA', 'AAG', 'CGT', 'CGC', 'CGA', 'CGG', 'AGA', 'AGG', 'TAA', 'TAG', 'TGA']\n\n### dictionary to make global counts\ncodon_global_count = OrderedDict()\n\n### write header of table\noutput.write('Gene'+ '\\t')\nfor c in codons:\n\toutput.write(c + '\\t')\n\tcodon_global_count[c] = 0\noutput.write('Length'+ '\\n')\n\n### nested dictionary to keep track of individual gene codon counts\ngene_codon_count = OrderedDict()\ntotal_gene_len = 0\nfor genes in genes_seq:\n\tgene = genes.split('\\t')\n\tlength_gene = len(gene[1])\n\ttotal_gene_len = total_gene_len + length_gene\n\t### check to ensure genes are divisible by three \n\tif length_gene % 3 == 0:\n\t\tgene_codon_count[gene[0]] = OrderedDict()\n\t\tfor c in codons: \n\t\t\tgene_codon_count[gene[0]][c]=0\n\t\t### split the gene into its codons\n\t\tseq_split = textwrap.wrap(gene[1], 3)\n\t\tfor cod in seq_split:\n\t\t\tcodon_global_count[cod] = codon_global_count.get(cod) + 1\n\t\t\tgene_codon_count[gene[0]][cod] = gene_codon_count[gene[0]][cod]+1\n\t\t### write the gene and the counts of each codon to the file\n\t\toutput.write(str(gene[0] + '\\t'))\n\t\tfor k in gene_codon_count[gene[0]].keys():\n\t\t\toutput.write(str(gene_codon_count[gene[0]][k])+ '\\t')\n\t\toutput.write(str(length_gene)+ '\\n')\n\n### write the total counts for each codon to file \noutput.write('Totals' + '\\t')\nfor k in codon_global_count.keys():\n\toutput.write(str(codon_global_count.get(k)) + '\\t')\noutput.write(str(total_gene_len))\noutput.write('\\n')\n" }, { "alpha_fraction": 0.6292312741279602, "alphanum_fraction": 0.6629230976104736, "avg_line_length": 34.31843566894531, "blob_id": "ce0a4a00a5f9dd8d13218c7c6bd449c5848c0f52", "content_id": "e7a4c22d620b26816384baa746752d6fd3a56258", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6322, "license_type": "no_license", "max_line_length": 163, "num_lines": 179, "path": "/project_pipeline.py", "repo_name": "taylorcavazos/BIMM185", "src_encoding": "UTF-8", "text": "# Pipeline that uses a GEO dataset and the output of GSEA to analyze the results\n\n# Import necessary libraries for analysis\nimport re\nimport math\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import stats\nimport pandas as pd\nimport decimal\nimport MySQLdb\n\n# Read in GEO dataset GSE43837\nseries = open(\"GSE43837_series_matrix.txt\").read().splitlines()\n\n# Step 1: Prepare input data for GSEA\n# Find position in file where series matrix starts, ignore all data above this position\nrows = []\npos = 0\nfor line in series:\n if re.search(\"!Sample_source_name_ch1\", line):\n phenotypes = line.split(\"!Sample_source_name_ch1\\t\")[1].split(\"\\t\")\n if re.search(\"!series_matrix_table_begin\", line):\n break\n pos = pos+1\nfor i in range(pos+1, len(series)-1):\n rows.append(series[i])\n# extract the number of samples from dataset\nnum_samples = len(rows[0].split(\"\\t\")[1:])\n# output phenotype.cls file (input for GSEA)\nphen = open(\"phenotype_GSE43837.cls\", \"w\")\nphen.write(str(num_samples) + \"\\t2\\t1\\n\")\nphen.write(\"# MET PRIMARY\\n\")\nphen.write(\"\\t\".join(list(np.repeat([\"0\", \"1\"], [19, 19])))) \nphen.close()\n# output gene_expression.txt file (input for GSEA)\nexpr = open(\"gene_expression_GSE43837.txt\", \"w\")\nheader = rows[0].split(\"\\t\")\nexpr.write(\"NAME\\tDESCRIPTION\\t\")\nfor i in range(1, len(header)-1):\n expr.write(str(eval(header[i])) + \"\\t\")\nexpr.write(str(eval(header[len(header)-1])) + \"\\n\")\nfor i in range(1, len(rows)):\n r = rows[i].split(\"\\t\")\n expr.write(str(eval(r[0]))+\"\\tna\\t\" + \"\\t\".join(r[1:]) + \"\\n\")\nexpr.close()\n\n# Assuming GSEA was performed, gene set was chosen as signature, and core enriched genes in pathway were converted to their probes and included as an input file...\n# Step 2: Analyze gene set in comparison to known signatures for HER2+ brain metastases samples\n# Read in gene list and gene expression file\nGSE43837 = pd.read_csv(\"gene_expression_GSE43837.txt\", sep=\"\\t\", index_col=0)\nDEK_probes = open(\"DEK_genes_probes_hgu133.txt\").read().splitlines()\n# connect to mySQL database\ndb = MySQLdb.connect()\n# create cursor for database querying\nc = db.cursor()\n# remove unecessary description column\ndel GSE43837[\"DESCRIPTION\"]\n# make dictionary for probes and gene symbol\nDEK = {}\nfor i in range(1, len(DEK_probes)):\n probe_gene = DEK_probes[i].split(\" \")\n DEK[probe_gene[0][1:-1]] = probe_gene[1][1:-1]\n# subset dataset to only include core enriched probes in DEK pathway\ndf = pd.DataFrame(columns=GSE43837.columns)\nfor k in DEK.keys():\n if k in list(GSE43837.index):\n df.loc[GSE43837.ix[k,:].name] = GSE43837.ix[k,:]\n\n# plot the DEK oncogene expression signature for primary and metastasis samples\ny = []\nx = [1,2]\ny_met = ()\ny_pri = ()\nfor col in list(df.columns):\n # access mysql table consisting of patient, tumor type, HER2 status, ER status, and Age\n sql = \"SELECT Tumor_Type FROM breast_patient_info WHERE Sample_ID='\" + col + \"'\"\n c.execute(sql)\n tumor_type = c.fetchone()[0]\n if tumor_type == \"brain metastasis\":\n y_met = y_met + (df[[col]].mean().values[0],)\n if tumor_type == \"primary breast tumor\":\n y_pri = y_pri + (df[[col]].mean().values[0],)\ny.append(y_met)\ny.append(y_pri)\nfor xe, ye in zip(x, y):\n plt.scatter([xe]*len(ye), ye)\nplt.xticks([1,2])\nplt.boxplot([y_met, y_pri])\nplt.axes().set_xticklabels(['Metastases', 'Primary'])\nwilcox = round(decimal.Decimal(stats.wilcoxon(y_met, y_pri)[1]), 4)\nplt.title(\"Wilcoxon p=\" + str(wilcox),size=14)\nplt.ylabel(\"DEK Oncogene Encriched Signature\")\nplt.show() \n\n# Plot the BRCA1 (probe = g2218153_3p_a_at) expression for samples\nBRCA_exp = GSE43837.ix[\"g2218153_3p_a_at\",:]\ny = []\nx = [1,2]\ny_met = ()\ny_pri = ()\nfor col in list(df.columns):\n sql = \"SELECT Tumor_Type FROM breast_patient_info WHERE Sample_ID='\" + col + \"'\"\n c.execute(sql)\n tumor_type = c.fetchone()[0]\n if tumor_type == \"brain metastasis\":\n y_met = y_met + (df[[col]].mean().values[0],)\n if tumor_type == \"primary breast tumor\":\n y_pri = y_pri + (df[[col]].mean().values[0],)\ny.append(y_met)\ny.append(y_pri)\nfor xe, ye in zip(x, y):\n plt.scatter([xe]*len(ye), ye)\nplt.xticks([1,2])\nplt.boxplot([y_met, y_pri])\nplt.axes().set_xticklabels(['Metastases', 'Primary'])\nwilcox = round(decimal.Decimal(stats.wilcoxon(y_met, y_pri)[1]), 4)\nplt.title(\"Wilcoxon p=\" + str(wilcox),size=14)\nplt.ylabel(\"BRCA1 Expression (g2218153_3p_a_at)\")\nplt.show()\n\n# Plot the BRCA1 (probe = g6552300_3p_a_at) expression for samples\nBRCA_exp = GSE43837.ix[\"g6552300_3p_a_at\",:]\ny = []\nx = [1,2]\ny_met = ()\ny_pri = ()\nfor col in list(df.columns):\n sql = \"SELECT Tumor_Type FROM breast_patient_info WHERE Sample_ID='\" + col + \"'\"\n c.execute(sql)\n tumor_type = c.fetchone()[0]\n if tumor_type == \"brain metastasis\":\n y_met = y_met + (df[[col]].mean().values[0],)\n if tumor_type == \"primary breast tumor\":\n y_pri = y_pri + (df[[col]].mean().values[0],)\ny.append(y_met)\ny.append(y_pri)\nfor xe, ye in zip(x, y):\n plt.scatter([xe]*len(ye), ye)\nplt.xticks([1,2])\nplt.boxplot([y_met, y_pri])\nplt.axes().set_xticklabels(['Metastases', 'Primary'])\nwilcox = round(decimal.Decimal(stats.wilcoxon(y_met, y_pri)[1]), 4)\nplt.title(\"Wilcoxon p=\" + str(wilcox),size=14)\nplt.ylabel(\"BRCA1 Expression (g6552300_3p_a_at)\")\nplt.show()\n\n# Explore the effect of ER status on DEK expression\nER_pos_met = []\nER_pos_pri = []\nER_neg_met = []\nER_neg_pri = []\nfor col in df.columns:\n sql = \"SELECT Sample_ID, Tumor_Type, ER_status FROM breast_patient_info WHERE Sample_ID='\" + col + \"'\"\n c.execute(sql)\n status = c.fetchone()\n if status[1] == \"brain metastasis\": \n if status[2] == \"+\": \n ER_pos_met.append(df[[status[0]]].mean().values[0])\n elif status[2] == \"-\":\n ER_neg_met.append(df[[status[0]]].mean().values[0])\n elif status[1] == \"primary breast tumor\":\n if status[2] == \"+\":\n ER_pos_pri.append(df[[status[0]]].mean().values[0])\n elif status[2] == \"-\":\n ER_neg_pri.append(df[[status[0]]].mean().values[0])\ny = []\nx = [1,2,3,4]\ny.append(ER_neg_met)\ny.append(ER_pos_pri)\ny.append(ER_neg_pri)\nfor xe, ye in zip(x, y):\n plt.scatter([xe]*len(ye), ye)\nplt.xticks([1,2,3,4])\nplt.boxplot([ER_pos_met, ER_neg_met, ER_pos_pri, ER_neg_pri])\nplt.axes().set_xticklabels(['Met ER+','Met ER-', 'Primary ER+', 'Primary ER-'])\nplt.title(\"ER Status Versus DEK Signature Expression\",size=14)\nplt.ylabel(\"DEK Expression\")\nplt.show()\n" }, { "alpha_fraction": 0.6724960207939148, "alphanum_fraction": 0.678855299949646, "avg_line_length": 31.842105865478516, "blob_id": "edd4de0c0bcda1a279eeb4b26cae1255f2950180", "content_id": "510b0a3b461e18743c327036cf698b2333d5a4ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 629, "license_type": "no_license", "max_line_length": 91, "num_lines": 19, "path": "/z_score.py", "repo_name": "taylorcavazos/BIMM185", "src_encoding": "UTF-8", "text": "###This function outputs the highest z-score for each report.tbl file in the sub directorys\n### Input: file with directories pointing to report files for sub directories in dirtree\n\nimport sys\nimport csv\n\nfiles = sys.argv[1]\n\noutput_file = open(\"output.txt\", 'w')\nfiles = open(files).read().splitlines()\nfor f in files:\n\twith open(f) as f_open:\n \t\treader = csv.reader(f_open, delimiter=\"\\t\")\n \t\td = list(reader)\t\t\n\t\t\n\t\toutput_file.write(f.split('/')[0]) ### write the directory name to the file\n\t\toutput_file.write('\\t')\n\t\toutput_file.write(d[2][3]) ### write the highest z score to the file\n\t\toutput_file.write('\\n')\n\n\n\n\n\n" }, { "alpha_fraction": 0.6071428656578064, "alphanum_fraction": 0.6234745383262634, "avg_line_length": 56.44329833984375, "blob_id": "05ae5a251dd43a1c3b3cacfc9fa1ae4fd285c71d", "content_id": "44cd7baf63ccaaf49759bfada35a4019795349d8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5572, "license_type": "no_license", "max_line_length": 246, "num_lines": 97, "path": "/parse_genbank_4_tables.py", "repo_name": "taylorcavazos/BIMM185", "src_encoding": "UTF-8", "text": "### This script outputs all necessary files to fill genes and gene features tables.\n### INPUTS: Genbank File and Organism for file naming\n### OUTPUTS: A number of files used to fill SQL tables\n\n\nimport sys\nfrom Bio import SeqIO # importing biopython\nimport gzip # importing library to read zipped files\n\n# list with genomes you want to add to the database\ngb_file = [\"/home/linux/ieng6/bm185s/tcavazos/genomes/E_coli_K12_MG1655/GCF_000005845.2_ASM584v2_genomic.gbff.gz\",\"/home/linux/ieng6/bm185s/tcavazos/genomes/A_tumafaciens/GCF_000576515.1_ASM57651v1/GCF_000576515.1_ASM57651v1_genomic.gbff.gz\"]\n\n# out files to write to different tables\nout_genome = open(\"genomes.txt\", \"w\")\nout_replicon = open(\"replicons.txt\", \"w\")\nout_gene = open(\"genes.txt\", \"w\")\nout_exon = open(\"exons.txt\", \"w\")\nout_ext = open(\"extref.txt\", \"w\")\nout_syn = open(\"synonyms.txt\", \"w\")\nout_fun = open(\"functions.txt\", \"w\")\n# counters to maintain the current gene id, replicon id, and genome id for sql tables\n# these fields should be updated to show the last id used for each field\ngenome_id, replicon_id, gene_id = 0,0,0\n\n# begin looping through genomes\nfor i in range(0, len(gb_file)):\n genome_id = genome_id + 1 #increment genome id for every genome\n gnm_size, gnm_num_genes, num_reps = 0,0,0 # set counters for genome size, number of genes, and number of replicons to zero\n # open gzipped genome genbank file and parse through replicons in the genome\n with gzip.open(gb_file[i]) as f:\n for record in SeqIO.parse( f, \"gb\"):\n # getting genome and replicon information\n rep_shape = record.annotations.get(\"topology\"); replicon_name = record.description; accession = record.name; date = record.annotations.get(\"date\"); assembly = record.dbxrefs[2].split(\":\")[1]; rep_size = len(record.seq)\n # incrementing replicon counts and genome size\n num_reps = num_reps+1; replicon_id = replicon_id+1; num_genes_rep=0; gnm_size = gnm_size + rep_size\n # getting replicon type\n if \"plasmid\" in record.description.lower(): rep_type = \"plasmid\"\n else: rep_type = \"chromosome\"\n # extracting genome taxonomy\n domain = record.annotations[\"taxonomy\"][0]\n if domain == \"Bacteria\": domain = \"bacteria\"\n elif domain == \"Archaeon\": domain = \"archea\"\n elif domain == \"Eukaryota\": domain = \"eukarya\"\n # looping through genes of replicon\n for feat in record.features:\n #getting genome tax_id and name from source\n\tif feat.type==\"source\": \n tax_id = feat.qualifiers.get(\"db_xref\")[0].split(\":\")[1]\n genome_name = feat.qualifiers.get(\"organism\")\n # only examine features if CDS, exclude pseudo genes \n\tif feat.type==\"CDS\" and \"pseudo\" not in feat.qualifiers: \n\t # increment gene counters\n gene_id = gene_id+1; gnm_num_genes = gnm_num_genes+1; num_genes_rep = num_genes_rep+1 \n\t # Extract protein id (external reference)\n if feat.qualifiers.get(\"protein_id\") != None: \n\t protein_id = feat.qualifiers[\"protein_id\"][0].split(\".\")[0]\n\t out_ext.write(str(gene_id) + \"\\t\" + \"refseq\" + \"\\t\" + protein_id + \"\\n\")\n # Extract external references\n if feat.qualifiers.get(\"db_xref\")!=None:\n\t for ref in feat.qualifiers.get(\"db_xref\"):\n x_db = ref.split(\":\")[0]\n x_id = ref.split(\":\")[1]\n\t out_ext.write(str(gene_id)+ \"\\t\" + x_db + \"\\t\" + x_id + \"\\n\")\n\t# Extract gene synonyms\n if feat.qualifiers.get(\"gene_synonym\") !=None:\n\t for syn in feat.qualifiers.get(\"gene_synonym\")[0].split(\"; \"):\n\t out_syn.write(str(gene_id) + \"\\t\" + syn + \"\\n\")\n # Extract locus tag\n locus_tag = feat.qualifiers[\"locus_tag\"][0]\n\t# Extract gene function\n if feat.qualifiers.get(\"function\") != None:\n\t func = feat.qualifiers[\"function\"][0]\n\t out_fun.write(str(gene_id) + \"\\t\" + func + \"\\n\")\n # Extract gene name\n\t if feat.qualifiers.get(\"gene\") != None: name = feat.qualifiers[\"gene\"][0]\n\t elif feat.qualifiers.get(\"old_locus_tag\") != None: name = feat.qualifiers[\"old_locus_tag\"][0]\n else: name = \" \"\n # Extract direction of strand\n if feat.location.strand == 1: strand = \"F\"\n else: strand = \"R\"\n # Extract length of gene and number of exons\n num_exons = len(feat.location.parts)\n len_bp = len(feat)\n # Extract exon information such as start, stop, and length\n for l in feat.location.parts:\n left = l.start \n right = l.end\n out_exon.write(str(gene_id) + \"\\t\" + name + \"\\t\" + str(left) + \"\\t\" + str(right) + \"\\t\" + str(len_bp) + \"\\n\")\n # Extract product name of gene\n if feat.qualifiers.get(\"product\") == None: product_name = \" \"\n else: product_name = feat.qualifiers[\"product\"][0]\n # Write to genes table file \n out_gene.write(str(gene_id) + \"\\t\" + str(genome_id) + \"\\t\" + str(replicon_id) + \"\\t\" + str(locus_tag) +\"\\t\"+protein_id+ \"\\t\" + str(name) + \"\\t\" + str(strand) + \"\\t\" + str(num_exons) + \"\\t\" + str(len_bp) + \"\\t\"+ str(product_name) + \"\\n\")\n # Write to replicons table file\n out_replicon.write(str(replicon_id) + \"\\t\" + str(genome_id) + \"\\t\" + str(replicon_name) + \"\\t\" + str(rep_type) + \"\\t\" + str(rep_shape) + \"\\t\" + str(num_genes_rep) + \"\\t\" + str(rep_size) + \"\\t\" + str(accession) + \"\\t\" + str(date) + \"\\n\" )\n # Write to genomes table file \n out_genome.write(str(genome_id) + \"\\t\" + str(genome_name[0]) + \"\\t\" + str(tax_id) + \"\\t\" + domain + \"\\t\" + str(num_reps) + \"\\t\" + str(gnm_num_genes) + \"\\t\" + str(gnm_size) + \"\\t\" + str(assembly) + \"\\n\")\n" }, { "alpha_fraction": 0.6304118037223816, "alphanum_fraction": 0.6483632326126099, "avg_line_length": 29.419355392456055, "blob_id": "75b649ff46ac4ef684c9d37387b06f02335480b0", "content_id": "ecf433a23c71bf90c7f00b80b9d05ff3744f1827", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 947, "license_type": "no_license", "max_line_length": 132, "num_lines": 31, "path": "/creat_prot_data.py", "repo_name": "taylorcavazos/BIMM185", "src_encoding": "UTF-8", "text": "### This function takes an input of protein sequences and creates a protein data base that can be used for other downstream analysis\n### The protein sequence number and identification number along with the sequence are outputted\n \nimport sys\nimport re\n\nprot_file = sys.argv[1]\nprot_data = open(prot_file).read().splitlines()\n\noutput_file = open('/home/linux/ieng6/bm185s/tcavazos/my_files/prot_database.txt', 'w')\n\nprot_fixed = []\ni = 0\n\nwhile(i < len(prot_data)):\n\tif prot_data[i].startswith('>') == True:\n\t\thead = prot_data[i]\n\t\ti = i+1\n\telse:\n\t\tseq = ''\n\t\twhile(i < len(prot_data) and prot_data[i].startswith('>') != True):\n\t\t\tseq = seq + prot_data[i]\n\t\t\ti = i+1\n\t\tprot_fixed.append((head, seq))\n\nfor i in range(0, len(prot_fixed)):\n\tmatch = re.search('>gnl\\|[A-Z-]+\\|([0-9]+)\\|([1-9.A-Z]+)', prot_fixed[i][0])\n\t\n\tif match:\n\t\toutput_file.write(str(match.group(2)+ \"-\"+ match.group(1)+ '\\n'))\n\t\toutput_file.write(str(prot_fixed[i][1]+\"\\n\"))\n\t\n\t\n" }, { "alpha_fraction": 0.6117236018180847, "alphanum_fraction": 0.6273191571235657, "avg_line_length": 46.67948532104492, "blob_id": "bebea7dbc78e6d9ef4ae886fc64cfc956f022598", "content_id": "0161466b817da3d93039c1d512cdfc509c0ee0a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3719, "license_type": "no_license", "max_line_length": 82, "num_lines": 78, "path": "/calc_op_dist.py", "repo_name": "taylorcavazos/BIMM185", "src_encoding": "UTF-8", "text": "# Function that outputs the distances between operons\nimport sys\nimport MySQLdb\nimport re\n# Connect to MySQL database\ndb = MySQLdb.connect()\n# Create cursor for database querying\nc = db.cursor()\n\noperons = open(\"operons.txt\").read().splitlines() # list of operons and genes\nproducts = open(\"gene_bnumber.txt\").read() # list of genes and there bnumber\n\nborders_fwd = {} # dictionary to contain forward strand genes on operon borders\nborders_rev = {} # dictionary to contain reverse strand genes on operon borders\nfor ls in [operons]:\n # loop through operons\n for i in range(0, len(ls)):\n op = ls[i].split(\"\\t\") \n # extract operon name and its genes\n name = op[0]; genes = op[1].split(\",\")\n # get genes on left and right border of operon\n gene_l = genes[0]; gene_r = genes[len(genes)-1]\n # search for bnumbers of border genes\n sl = re.search(str(gene_l)+\"\\t\"+\"(b[0-9]+)\", products)\n sr = re.search(str(gene_r)+\"\\t\"+\"(b[0-9]+)\", products)\n # if there bnumbers were found, find the gene id and strand \n if sl and sr: \n bnum_l = sl.group(1)\n bnum_r = sr.group(1)\n # query genes table for gene id and strand using bnumber\n sql = \"SELECT gene_id,strand FROM genes WHERE locus_tag='\" + bnum_l + \"'\"\n c.execute(sql)\n result_l = c.fetchone() \n sql = \"SELECT gene_id,strand FROM genes WHERE locus_tag='\" + bnum_r + \"'\"\n c.execute(sql)\n result_r = c.fetchone()\n # if gene id was found for both genes extract the info\n if result_l != None and result_r != None:\n id_l = int(result_l[0]); strand_l = result_l[1]\n id_r = int(result_r[0]); strand_r = result_r[1]\n # Find left position of left border gene from exon table using gene id\n sql = \"SELECT left_pos FROM exons WHERE gene_id=\" + str(id_l)\n c.execute(sql)\n left_pos = int(c.fetchone()[0])\n # Find right position of right border gene from exon table using gene id\n sql = \"SELECT right_pos FROM exons WHERE gene_id=\" + str(id_r)\n c.execute(sql)\n right_pos = int(c.fetchone()[0])\n # If genes are on forward strand add them to forward dictionary\n if strand_l == \"F\" and strand_l == strand_r:\n if borders_fwd.get(name) == None: \n borders_fwd[name] = (id_l, id_r, left_pos, right_pos)\n # If genes are on reverse strand add them to reverse dictionary\n elif strand_l == \"R\" and strand_l == strand_r:\n if borders_rev.get(name) == None:\n if len(genes) > 1: \n borders_rev[name] = (id_r, id_l, right_pos,left_pos)\n else:\n borders_rev[name] = (id_l, id_r, left_pos, right_pos)\n# Sort genes based on there left position\nsorted_fwd = sorted(borders_fwd.items(), key=lambda x: x[1][2])\nsorted_rev = sorted(borders_rev.items(), key=lambda x: x[1][2])\n# Open output file to write distances between operons\ndistances = open(\"operon_distances.txt\", \"w\")\n# Loop through operon pairs in forward strand\nfor i in range(0, len(sorted_fwd)-1):\n op1_name = sorted_fwd[i][0]; op2_name = sorted_fwd[i+1][0]\n right_op1 = sorted_fwd[i][1][3]; left_op2 = sorted_fwd[i+1][1][2]\n dist = left_op2-right_op1\n # write names of operon pair and distances between them to file\n distances.write(op1_name + \"\\t\" + op2_name+ \"\\t\" + \"+\" + \"\\t\" + str(dist)+ \"\\n\")\n# Loop through operon pairs in reverse strand\nfor i in range(0, len(sorted_rev)-1):\n op1_name = sorted_rev[i][0]; op2_name = sorted_rev[i+1][0]\n right_op1 = sorted_rev[i][1][3]; left_op2 = sorted_rev[i+1][1][2]\n dist = left_op2-right_op1\n # write names of operon pair and distances between them to file\n distances.write(op1_name + \"\\t\" + op2_name +\"\\t\" + \"-\" + \"\\t\" + str(dist)+ \"\\n\")\n" }, { "alpha_fraction": 0.5721107721328735, "alphanum_fraction": 0.593123197555542, "avg_line_length": 45.19117736816406, "blob_id": "42e440f4609cb5dceef9ac9457f2b81c7945b44e", "content_id": "e957c0f3537ff1441bb5309ea97ad12c5b7713e5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3141, "license_type": "no_license", "max_line_length": 85, "num_lines": 68, "path": "/calculate_intergenic_dist.py", "repo_name": "taylorcavazos/BIMM185", "src_encoding": "UTF-8", "text": "# Function that calculates intergenic distances between genes in an operon set\nimport sys\nimport MySQLdb\nimport re\n# Connect to sql database\ndb = MySQLdb.connect()\n# Create cursor to query tables\nc = db.cursor()\n# Open file for writing distances\nout = open(\"intergenic_distances.txt\", \"w\")\nout.write(\"geneid_1\\tgeneid_2\\tdistance\\n\")\n\noperons = open(\"operons.txt\").read().splitlines() # list of operons and genes\nproducts = open(\"gene_bnumber.txt\").read() # list of genes and bnumbers\n\ndistances = [] # list to track distances between two adjacent genes\n# Loop through all operons\nfor i in range(0, len(operons)):\n op = operons[i].split(\"\\t\")\n genes = op[1].split(\",\")\n # Only calculate distances if there is more than one gene in the operon\n if len(genes) > 1:\n # Loop through genes within operon\n for i in range(0, len(genes)-1):\n # search for bnumbers of two adjacent operons \n s1 = re.search(str(genes[i])+\"\\t\"+\"(b[0-9]+)\", products)\n s2 = re.search(str(genes[i+1])+\"\\t\"+\"(b[0-9]+)\", products)\n # if there bnumbers were found save them\n if s1 and s2:\n bnum_1 = s1.group(1)\n bnum_2 = s2.group(1)\n # query genes table and extract gene id and strand info for both genes\n sql = \"SELECT gene_id,strand FROM genes WHERE locus_tag='\" + bnum_1 + \"'\"\n c.execute(sql)\n result_1 = c.fetchone()\n sql = \"SELECT gene_id,strand FROM genes WHERE locus_tag='\" + bnum_2 + \"'\"\n c.execute(sql)\n result_2 = c.fetchone()\n # if genes were found in the database extract exon info\n if result_1 != None and result_2 != None:\n id_1 = int(result_1[0]); strand_1 = result_1[1]\n id_2 = int(result_2[0]); strand_2 = result_2[1]\n # if the genes are in the forward strand calculate the distance by\n # left position gene 2 minus right position gene 1\n if strand_1 == \"F\" and strand_1 == strand_2:\n # get right position of gene 1\n sql = \"SELECT right_pos FROM exons WHERE gene_id=\" + str(id_1)\n c.execute(sql)\n right = int(c.fetchone()[0])\n # get left position of gene 2\n sql = \"SELECT left_pos FROM exons WHERE gene_id=\" + str(id_2)\n c.execute(sql)\n left = int(c.fetchone()[0])\n # write gene pair and distance to out file\n out.write(str(id_1) + \"\\t\" + str(id_2) + \"\\t\" + str(left - right) + \"\\n\")\n # if the genes are in the reverse strand calculate the distance by\n # left position gene 1 minus right position gene 2\n elif strand_1 == \"R\" and strand_1 == strand_2:\n # get right position from gene 2\n sql = \"SELECT right_pos FROM exons WHERE gene_id=\" + str(id_2)\n c.execute(sql)\n right = int(c.fetchone()[0])\n # get left position from gene 1\n sql = \"SELECT left_pos FROM exons WHERE gene_id=\" + str(id_1)\n c.execute(sql)\n left = int(c.fetchone()[0])\n # write gene pair and distance to out file\n out.write(str(id_2) + \"\\t\" + str(id_1) + \"\\t\" + str(left-right) + \"\\n\")\n" }, { "alpha_fraction": 0.5952000021934509, "alphanum_fraction": 0.6047999858856201, "avg_line_length": 28.690475463867188, "blob_id": "0e1b3edd990506aa23f316db1f686c39d0d7f15b", "content_id": "540bc640c10b0f21fa129f5516033b3ba500e454", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1250, "license_type": "no_license", "max_line_length": 79, "num_lines": 42, "path": "/gnome2bnum_seq.py", "repo_name": "taylorcavazos/BIMM185", "src_encoding": "UTF-8", "text": "### Function that takes in a genome annotation in tabular format \n### and the genome sequences in fasta format\n### INPUTS: annotation table and whole genome\n### OUTPUTS: a file containing the b number and DNA sequence seperated by a tab\n \nimport sys \nimport csv\nimport textwrap\n\nDNA_dict = {'A': 'T', 'C': 'G', 'T':'A', 'G':'C'}\nfile_out = open('fasta_2.txt', 'w')\n\n### read in the table\ntable = sys.argv[1]\nwith open(table) as table:\n reader = csv.reader(table, delimiter=\"\\t\")\n table_list = list(reader)\n\n### read in the genome sequence\ngenome = sys.argv[2] \ngenome = open(genome,'r').read().splitlines()\ngenome_str = ''.join(genome[1:])\n\n### loop through genes\nfor i in range(1, len(table_list)-1):\n\tgene_id = table_list[i][7]\n\tstart = int(table_list[i][2])\n\tstop = int(table_list[i][3])\n\tstrand = table_list[i][4]\n\tfile_out.write(str(gene_id+ '\\t'))\n\tseq = genome_str[start-1:stop]\n\tif strand == \"+\":\n\t\tfile_out.write(seq)\n\t\tfile_out.write(\"\\n\")\n\t### if on reverse strand, find the reverse complement\n\telse: \n\t\trev_seq = seq[::-1]\n\t\trev_comp = ''\n\t\tfor r in rev_seq:\n\t\t\trev_comp = rev_comp + DNA_dict.get(r)\t\n\t\tfile_out.write(rev_comp)\n file_out.write('\\n')\t\t\t\n" }, { "alpha_fraction": 0.6113671064376831, "alphanum_fraction": 0.6298003196716309, "avg_line_length": 34.18918991088867, "blob_id": "a477d867ec8d40a81590db32b7aabf72ae3b0e8f", "content_id": "6a1cc027d4819359f90ad2001ae9480cdf8960cd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1302, "license_type": "no_license", "max_line_length": 109, "num_lines": 37, "path": "/group_protein_scores.py", "repo_name": "taylorcavazos/BIMM185", "src_encoding": "UTF-8", "text": "# Function that groups proteins that are related and sorts their scores in order from highest to lowest score\n# Also a file with scores for each protein in ascending order is outputted\n\nimport sys\nimport bz2\n\nprot_file = sys.argv[1]\nprot_file_unzip = bz2.BZ2File(prot_file)\nprot_file_read = prot_file_unzip.read().splitlines()\n\nprot_dict = {}\ncount = 0\nfor i in range(0,len(prot_file_read)):\n curr_protein = prot_file_read[i].split(\"\\t\")\n if len(prot_dict) > 2000:\n break\n elif prot_dict.get(curr_protein[0]) == None:\n prot_dict[curr_protein[0]] = [(curr_protein[1],curr_protein[3])]\n else:\n prot_dict[curr_protein[0]].append((curr_protein[1],curr_protein[3]))\n\n### Write dictionary to a file\noutput_file = open('/home/linux/ieng6/bm185s/tcavazos/my_files/prot_relations.txt', 'w')\nfor k, v in prot_dict.items():\n for val in sorted(v, key=lambda x:x[1], reverse=True) :\n output_file.write(str(k+'\\t'+val[0]+'\\t'+val[1]+'\\n'))\n\n#### find protein with max number of related proteins\n\nmax_score = float(\"-inf\")\nprot_max = None\nfor keys, values in prot_dict.items():\n if len(values) > max_score:\n max_score = len(values)\n prot_max = keys\nprint(prot_max)\nprint(max_score)\n" }, { "alpha_fraction": 0.6620395183563232, "alphanum_fraction": 0.6780565977096558, "avg_line_length": 37.95833206176758, "blob_id": "ace94dcb8a052a02e9ccde320b1b83a3a62f119e", "content_id": "f22007ec3e04c0b75b0e52c07b5bd72798f468d9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1873, "license_type": "no_license", "max_line_length": 139, "num_lines": 48, "path": "/GSEA_inputs.py", "repo_name": "taylorcavazos/BIMM185", "src_encoding": "UTF-8", "text": "# Function to convert matrix series files from NCBI GEO into GSE inputs\n# INPUT: NCBI GEO series matrix\n# OUTPUTS: (1) gene expression file (.txt) containing probes versus samples and expression values\n########## (2) phenotype (.cls) file\n# specifications for the above files can be found at (http://software.broadinstitute.org/cancer/software/gsea/wiki/index.php/Data_formats) \n\nimport sys\nimport re \n\n# read in series data\nseries = open(sys.argv[1]).read().splitlines() #input series matrix from GEO and read \nrows = []\n# find position in file where series matrix starts, ignore all data above this position\npos = 0\nfor line in series:\n if re.search(\"!Sample_source_name_ch1\", line):\n phenotypes = line.split(\"!Sample_source_name_ch1\\t\")[1].split(\"\\t\")\n if re.search(\"!series_matrix_table_begin\", line):\n break\n pos = pos+1 \nfor i in range(pos+1, len(series)-1):\n rows.append(series[i])\n# extract the number of samples from dataset\nnum_samples = len(rows[0].split(\"\\t\")[1:])\n\n# output phenotype .cls file\nphen_file = open(\"phenotypes.cls\", \"w\")\nphen_file.write(str(num_samples) + \"\\t2\\t1\\n\")\nphen_file.write(\"# MET PRIMARY\\n\")\n# write phenotype for each sample (0 for metastatic 1 for primary)\nfor p in range(0, len(phenotypes)-1):\n if phenotypes[p] == '\"brain metastasis\"':\n phen_file.write(\"0 \")\n elif phenotypes[p] == '\"primary breast tumor\"':\n phen_file.write(\"1 \")\nif phenotypes[p+1] == '\"brain metastasis\"': phen_file.write(\"0\\n\")\nelse: phen_file.write(\"1\\n\")\n\n# output expression data\nexpr = open(\"gene_expression.txt\", \"w\") \nheader = rows[0].split(\"\\t\")\nexpr.write(\"NAME\\tDESCRIPTION\\t\")\nfor i in range(1, len(header)-1):\n expr.write(str(eval(header[i])) + \"\\t\")\nexpr.write(str(eval(header[len(header)-1])) + \"\\n\")\nfor i in range(1, len(rows)):\n r = rows[i].split(\"\\t\")\n expr.write(str(eval(r[0])) + \"\\tna\\t\" + \"\\t\".join(r[1:]) + \"\\n\") \n\n\n" }, { "alpha_fraction": 0.6059337854385376, "alphanum_fraction": 0.6222898364067078, "avg_line_length": 46.79999923706055, "blob_id": "77376a1cddeefcb36c4583c841f4deffbb83008f", "content_id": "007bb87ff5070735e9c2636d1c51e3858537330f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2629, "license_type": "no_license", "max_line_length": 198, "num_lines": 55, "path": "/parse_genebank.py", "repo_name": "taylorcavazos/BIMM185", "src_encoding": "UTF-8", "text": "### Function that uses biopython to parse a GenBank file to extract useful information\n### INPUT: GenBank file\n### OUTPUT: a tab seperated file conaining info such as accession, coordinates, strand, gene name, and more. \n\nfrom Bio import SeqIO # importing biopython\nimport gzip # importing library to read zipped files\n\n# Open an output file for writing\nout = open(\"GCF_000005845.2_ASM584v2_genomic_features.txt\", \"w\")\n# Writing the header to the file\nout.write(\"Accession\" + \"\\t\"+\"Coordinates\"+ \"\\t\"+ \"Strand\"+\"\\t\"+\"Gene Name\"+ \"\\t\"+\"Locus Tag\"+\"\\t\"+\"Synonyms\"+ \"\\t\"+\"Protein Name\"+\"\\t\"+\"Tax ID\"+\"\\t\"+\"EC-number(s)\"+\"\\t\"+\"External reference\"+ \"\\n\" )\n\n# Opening the zipped GenBank file using gzip\nwith gzip.open(\"E_coli_K12_MG1655/GCF_000005845.2_ASM584v2_genomic.gbff.gz\", \"rt\") as f:\n gb_record = SeqIO.read( f, \"gb\") # obtaining info from genbank file\n source = gb_record.features[0] # getting source tax ID from file\n tax_ID = source.qualifiers[\"db_xref\"][0].split(\":\")[1]\n # Looping through features in genebank file \n for feat in gb_record.features:\n if feat.type == 'CDS': #keep features if they are CDS\n # output protein id to file if protein coding gene and psuedo if not for pseudogene\n if feat.qualifiers.get('protein_id') != None:\n out.write(feat.qualifiers['protein_id'][0]+ \"\\t\")\n else:\n out.write(\"pseudo\"+ \"\\n\")\n # if there are multiple coordinates for the CDS, loop through them and separate by commas\n for l in feat.location.parts:\n out.write(str(l.start) + \":\" + str(l.end) + \",\")\n out.write(\"\\t\")\n # write what strand the gene is on\n out.write(str(feat.location.strand) + \"\\t\")\n # write gene name for feature\n out.write(feat.qualifiers[\"gene\"][0] + \"\\t\")\n # write locus tag for feature\n out.write(feat.qualifiers[\"locus_tag\"][0] + \"\\t\")\n # write synonyms for feature\n out.write(feat.qualifiers[\"gene_synonym\"][0] + \"\\t\")\n \n # if there is product information, output it and if not output \"-\"\n if feat.qualifiers.get(\"product\") != None:\n out.write(feat.qualifiers[\"product\"][0] + \"\\t\")\n else:\n out.write(\"-\" + \"\\t\")\n # write the tax ID obtained from the source\n out.write(tax_ID + \"\\t\")\n # write all EC numbers\n if feat.qualifiers.get(\"EC_number\") != None:\n for e in feat.qualifiers[\"EC_number\"]:\n out.write(e + \",\")\n out.write(\"\\t\")\n else:\n out.write(\"-\" + \"\\t\")\n # write the external reference to the file\n out.write(feat.qualifiers[\"db_xref\"][0] + \"\\t\")\n out.write(\"\\n\")\n" }, { "alpha_fraction": 0.6821607947349548, "alphanum_fraction": 0.7248743772506714, "avg_line_length": 48.75, "blob_id": "d88981ceef4434851ecc4b7b60a9d16fd5e362d8", "content_id": "358511ac95e1eee5e0e7a5e673093f44a0b6ef12", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 796, "license_type": "no_license", "max_line_length": 113, "num_lines": 16, "path": "/parse_fasta.py", "repo_name": "taylorcavazos/BIMM185", "src_encoding": "UTF-8", "text": "### Function that parses a fasta file \n### INPUT: Fasta file\n### OUTPUT: Accession and sequence of every protein\n\nfrom Bio import SeqIO # importing SeqIO from biopython to parse file\nimport gzip # importing gzip to read zipped file\n\n# opening output file to write to and writing the header\nout = open(\"GCF_000005845.2_ASM584v2_protein_features\", \"w\")\nout.write(\"Accession\"+ \"\\t\"+\"Protein Sequence\"+ \"\\n\")\n\nwith gzip.open(\"E_coli_K12_MG1655/GCF_000005845.2_ASM584v2_protein.faa.gz\",\"rt\") as f:\n fasta = SeqIO.parse(f, \"fasta\") # parsing file\n for prot in fasta: # looking through every line that was parsed\n accession, sequence = prot.id, str(prot.seq) # save accession and sequence for every protein using id and seq\n out.write(accession + \"\\t\"+ sequence + \"\\n\") # writing output to file\n" }, { "alpha_fraction": 0.6735074520111084, "alphanum_fraction": 0.683768630027771, "avg_line_length": 34.70000076293945, "blob_id": "8096196adbce058c1d6e7101fa2340e94618df04", "content_id": "f39c7006165caae243ac714462ee8c1620015fa4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1072, "license_type": "no_license", "max_line_length": 73, "num_lines": 30, "path": "/calculate_CUI.py", "repo_name": "taylorcavazos/BIMM185", "src_encoding": "UTF-8", "text": "### This function calculates the CUI for each gene \n### INPUT: a file containing the bnumber, codon count, and length of gene\n### OUTPUT: a file with bnumber and CUI for each gene\n\nimport sys\n\n### Read in input file\ncodon_count = sys.argv[1]\ncodon_count = open(codon_count).read().splitlines()\n\n# Open an output file\nout = open(\"gene_CUI.txt\", \"w\")\n\n# Extract the total counts row from the file \ntotal_row = codon_count[len(codon_count)-1].split('\\t')\ntotal_codons_genome = int(total_row[len(total_row)-1])\n\n### Calculate the CUI for each gene \nfor i in range(1, len(codon_count)-1):\n\tline_split = codon_count[i].split('\\t')\n\tgene_name = line_split[0]\n\tgene_length = int(line_split[len(line_split)-1])\n\tCUI = 0 #variable that will sum the CUI for each codon\n\tfor j in range(1,len(line_split)-1):\n\t\t#q_c is the relative frequency of codon c in gene i\n\t\tq_c = float(line_split[j])/(gene_length/3) \n\t\t#p_c is the probability of codon c in the genome\n\t\tp_c = float(total_row[j])/total_codons_genome\n\t\tCUI = CUI + float(q_c*p_c)\n\tout.write(gene_name + \"\\t\" + str(CUI) + \"\\n\")\n\n" }, { "alpha_fraction": 0.5871559381484985, "alphanum_fraction": 0.6001529097557068, "avg_line_length": 29.34883689880371, "blob_id": "e662392fcb5c65cdedcf8ddb9ab05ab46805b7d1", "content_id": "5e174460eb5932a3e4f5cf6ba0cd8e8ad1bf51c5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1308, "license_type": "no_license", "max_line_length": 65, "num_lines": 43, "path": "/gnome2fasta.py", "repo_name": "taylorcavazos/BIMM185", "src_encoding": "UTF-8", "text": "### Function that takes in a genome annotation in tabular format \n### and the genome sequences in fasta format\n### Output: a fasta file with genes and there sequences\n \nimport sys \nimport csv\nimport textwrap\n\nDNA_dict = {'A': 'T', 'C': 'G', 'T':'A', 'G':'C'}\nfile_out = open('fasta.txt', 'w')\n\n### read in the table\ntable = sys.argv[1]\nwith open(table) as table:\n reader = csv.reader(table, delimiter=\"\\t\")\n table_list = list(reader)\n\n### read in the genome sequence\ngenome = sys.argv[2] \ngenome = open(genome,'r').read().splitlines()\ngenome_str = ''.join(genome[1:])\n\n### loop through genes\nfor i in range(1, len(table_list)-1):\n\tprotein = table_list[i][8]\n\tlocus = table_list[i][6]\n\tgene_id = table_list[i][7]\n\tstart = int(table_list[i][2])\n\tstop = int(table_list[i][3])\n\tstrand = table_list[i][4]\n\tfile_out.write(str('>'+protein+'|'+locus+'|'+gene_id+ '\\n'))\n\tseq = genome_str[start-1:stop]\n\tif strand == \"+\":\n\t\tfile_out.write(textwrap.fill(seq, width=70))\n\t\tfile_out.write('\\n')\n\t### if on reverse strand, find the reverse complement\n\telse: \n\t\trev_seq = seq[::-1]\n\t\trev_comp = ''\n\t\tfor r in rev_seq:\n\t\t\trev_comp = rev_comp + DNA_dict.get(r)\t\n\t\tfile_out.write(textwrap.fill(rev_comp, width=70))\n file_out.write('\\n')\t\t\t\n" }, { "alpha_fraction": 0.7177700400352478, "alphanum_fraction": 0.7177700400352478, "avg_line_length": 44.31578826904297, "blob_id": "862ba1534fa5684a267c823cf685ec7e6f1228c2", "content_id": "6c05aafdedc34b9a6c1a3ee402f64496a7e88b09", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 861, "license_type": "no_license", "max_line_length": 103, "num_lines": 19, "path": "/parse_uniprot.py", "repo_name": "taylorcavazos/BIMM185", "src_encoding": "UTF-8", "text": "### Function that parses a UniProt file and outputs the tax id, organism, and taxonomy for every record\nfrom Bio import SwissProt\nimport gzip\nfrom collections import OrderedDict\n# opening output file for writing\nout = open(\"uniprot_sprot_archaea_features.txt\", \"w\")\n# maintain dict to hold lines so no duplicates are written to out file\nlines_dict = OrderedDict()\n#unzip uniprot file and use swissprot to parse file\nwith gzip.open(\"uniprot_sprot_archaea.dat.gz\",\"rt\") as f:\n for record in SwissProt.parse(f): # parsing file\n key = \",\".join(record.taxonomy_id)\n value = str(record.organism) + \"\\t\" + \";\".join(record.organism_classification) + \"\\n\"\n if lines_dict.get(key) == None:\n lines_dict[key] = value\n# write uniq dict items to the file\nout.write(\"NCBI Tax ID\\tOrganism\\tTaxomony\\n\")\nfor k, v in lines_dict.items():\n out.write(k + \"\\t\" + v)\n" }, { "alpha_fraction": 0.6514343023300171, "alphanum_fraction": 0.6761174201965332, "avg_line_length": 31.586956024169922, "blob_id": "7b707e4436fe9bf5af376bb69cea47629b67226b", "content_id": "6d768b25542a0879de66150f74bf158159745b45", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 2998, "license_type": "no_license", "max_line_length": 80, "num_lines": 92, "path": "/tables.sql", "repo_name": "taylorcavazos/BIMM185", "src_encoding": "UTF-8", "text": "/* This file consists of the framework to create\n** the following tables:\n** genomes, replicons, genes, exons,synonyms, external references, and functions\n** \n** This file can either be directly ran in mySQL or individual table structures\n** can be copied and executed alone. \n*/\n\n/* This table contains genome information such as id, name, size, ext\n** genome_id: an automatic key used to link tables together\n*/\nCREATE TABLE genomes (\n genome_id INT(10) UNSIGNED NOT NULL,\n name VARCHAR(256) NOT NULL,\n tax_id INT(10) UNSIGNED NOT NULL,\n domain ENUM('bacteria','archaea','eukarya') NOT NULL,\n num_replicons SMALLINT(5) UNSIGNED NOT NULL,\n num_genes INT(10) UNSIGNED NOT NULL,\n size_bp BIGINT(15) UNSIGNED NOT NULL,\n assembly VARCHAR(25) NOT NULL,\n PRIMARY KEY (genome_id),\n KEY tax_id (tax_id)\n) ENGINE=InnoDB;\n\n/* This table contains the different replicons per genome\n** replicons are linked to genomes using the genome id\n*/\nCREATE TABLE replicons (\n replicon_id INT(10) UNSIGNED NOT NULL,\n genome_id INT(10) UNSIGNED NOT NULL,\n name VARCHAR(256) NOT NULL,\n type ENUM('chromosome','plasmid') NOT NULL,\n shape ENUM('circular','linear') NOT NULL,\n num_genes INT(10) UNSIGNED NOT NULL,\n size_bp BIGINT(15) UNSIGNED NOT NULL,\n accession VARCHAR(25) NOT NULL,\n release_date VARCHAR(25) NOT NULL,\n PRIMARY KEY (replicon_id),\n KEY(genome_id)\n) ENGINE=InnoDB;\n\n/* The genes table stores information about each gene in a genome*/\nCREATE TABLE genes (\n gene_id INT(10) UNSIGNED NOT NULL,\n genome_id INT(10) UNSIGNED NOT NULL,\n replicon_id INT(10) UNSIGNED NOT NULL,\n locus_tag CHAR(25) NOT NULL,\n protein_id CHAR(25) NOT NULL,\n name CHAR(10) NOT NULL,\n strand ENUM('F','R') NOT NULL,\n num_exons SMALLINT(5) UNSIGNED NOT NULL,\n length MEDIUMINT(7) UNSIGNED NOT NULL,\n product VARCHAR(1024) NOT NULL,\n PRIMARY KEY (gene_id),\n KEY (genome_id),\n KEY (replicon_id),\n KEY (locus_tag),\n KEY (protein_id)\n) ENGINE=InnoDB;\n\n/* The exon table holds information about each exon in a genome*/\nCREATE TABLE exons(\n gene_id INT (10) UNSIGNED NOT NULL,\n exon VARCHAR (100) NOT NULL,\n left_pos INT (10) UNSIGNED NOT NULL,\n right_pos INT (10) UNSIGNED NOT NULL,\n length INT (10) UNSIGNED NOT NULL,\n KEY (gene_id)\n) ENGINE=InnoDB;\n\n/* This table holds synonyms for each gene*/\nCREATE TABLE gene_synonyms (\n gene_id INT (10) UNSIGNED NOT NULL,\n synonyms VARCHAR (100) NOT NULL,\n KEY (gene_id) \n) ENGINE=InnoDB;\n\n/* This table holds the external references for each gene*/\nCREATE TABLE gene_xrefs (\n gene_id INT(10) UNSIGNED NOT NULL,\n xdb VARCHAR(32) NOT NULL,\n xid VARCHAR(24) NOT NULL,\n KEY (gene_id),\n KEY (xid)\n) ENGINE=InnoDB;\n\n/* This table holds the functions for each gene*/\nCREATE TABLE functions(\n gene_id INT (10) UNSIGNED NOT NULL,\n function VARCHAR (100) NOT NULL,\n KEY (gene_id)\n) ENGINE=InnoDB;\n" } ]
16
carlosbarreton/BarretoCarlos_Ejercicio21
https://github.com/carlosbarreton/BarretoCarlos_Ejercicio21
a0eb5560642de2fa595d2c7abc6136bfcf84e088
647f7b910501ade3c33c7122ab016c71a11d5024
27e5a6f5efcd914c81fa0dcb087bb3006ff21230
refs/heads/master
2020-08-24T18:23:56.945432
2019-10-22T18:39:36
2019-10-22T18:39:36
216,880,976
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.664893627166748, "alphanum_fraction": 0.6914893388748169, "avg_line_length": 16.18181800842285, "blob_id": "21d205967ee11e13e36968c39efb7acbc8c96d33", "content_id": "228c0d6e59854db3a1499665d25effcb8412f8c5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 188, "license_type": "no_license", "max_line_length": 31, "num_lines": 11, "path": "/grafica.py", "repo_name": "carlosbarreton/BarretoCarlos_Ejercicio21", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib.pyplot as plt\n\nx = np.linspace(0,2*np.pi,100)\ny = np.cos(x)\n\nplt.figure()\nplt.xlabel(\"x\")\nplt.ylabel(\"cos(x)\")\nplt.plot(x,y)\nplt.savefig(\"grafica.png\")" } ]
1
Blacktrooth/User-signup
https://github.com/Blacktrooth/User-signup
6ef12ee3f10e49bee94c840e35602e87745f229e
9e8f002cd8e31cda41a8a80f17a9bbd90ecc0cb0
207ca3a612b7e1b433c42fdee25adbb4b4013d5b
refs/heads/master
2020-05-15T08:56:05.491394
2019-04-28T02:04:31
2019-04-28T02:04:31
182,168,385
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5239320397377014, "alphanum_fraction": 0.5316520929336548, "avg_line_length": 25.2702693939209, "blob_id": "ccfced2e904c195d1ea62cf167a9a77366848214", "content_id": "96624bfddf28f2a4a48964521d0b521311206ef3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1943, "license_type": "no_license", "max_line_length": 106, "num_lines": 74, "path": "/main.py", "repo_name": "Blacktrooth/User-signup", "src_encoding": "UTF-8", "text": "from flask import Flask, render_template, request, redirect\nimport cgi\n\napp = Flask(__name__)\napp.config['DEBUG'] = True\n\ndef ver_email(email):\n if email != '':\n atsign = 0\n period = 0\n for char in email:\n if char == '@':\n atsign += 1\n elif char == '.':\n period += 1\n if ((20 < len(email) or len(email) < 3) or (atsign != 1) or (period != 1) or (\" \" in email)):\n return False\n else:\n return True\n else:\n return True\n\n@app.route('/signup', methods=['POST'])\ndef signup():\n username = request.form['username']\n password = request.form['password']\n ver_pass = request.form['ver_pass']\n email = request.form['email']\n Error = False\n error = {\n 'user' : \"\",\n 'pass':\"\",\n 'match':\"\",\n 'email':\"\"\n }\n\n if (username == \"\" or \" \" in username or 3 > len(username) or len(username) > 20):\n error['user'] = \"That's not a valid username\"\n Error = True\n pass\n else:\n pass\n\n if (password == \"\" or \" \" in password or 20 < len(password) or len(password) < 3):\n error['pass'] = \"That's not a valid password\"\n Error = True\n pass\n else:\n pass\n\n if (ver_pass != password or ver_pass == \"\"):\n error['match'] = \"Passwords don't match\"\n Error = True\n pass\n else:\n pass \n\n if (ver_email(email) != True):\n error['email'] = \"That's not a valid email\"\n Error = True\n pass\n else:\n pass\n\n if (Error):\n return render_template('/signup.html', username=username, error=error)\n else:\n return render_template('/welcome.html', username=username, email=email)\n@app.route('/')\ndef index():\n compound_error = request.args.get('error')\n return render_template('signup.html', error=compound_error and cgi.escape(compound_error, quote=True))\n\napp.run()" } ]
1
KyleGrace/animal-game
https://github.com/KyleGrace/animal-game
3be9ff6053d52bf73f15eb573439173bc04ab06d
baf33ce8a9c7ac947c1429af86f3b4dd73c8dd81
6abca8ea5eba773f925b5d2be44927d4bdffb26d
refs/heads/main
2023-04-24T12:22:52.860973
2021-05-16T02:33:21
2021-05-16T02:33:21
358,081,591
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6481252312660217, "alphanum_fraction": 0.6580139994621277, "avg_line_length": 23.755102157592773, "blob_id": "b46cb6fd28e95c92757391777a91b74331ba67e7", "content_id": "5482d63dd7e3e739b8f4d00ee87e9e43722c1729", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2427, "license_type": "no_license", "max_line_length": 82, "num_lines": 98, "path": "/web-app/user.py", "repo_name": "KyleGrace/animal-game", "src_encoding": "UTF-8", "text": "import random\nfrom flask import session\nfrom prompts import *\n\nimport time\n\ndef initializeSession():\n session.clear()\n # must be ~<20 or the cookies grow too large,\n # requires rework of prompts storage in cookies to grow bigger than 20\n numdays = 10 # Update this to determine how many prompts they must pass to win\n session['animal_metric'] = 10\n session['human_metric'] = 10\n\n prompts = starterPrompts\n\n # Shortening it to 11 prompts to help the cookies stay small\n smallerPrompts = random.sample(list(prompts),(numdays+1))\n newprompts = {}\n for prompt in smallerPrompts:\n newprompts[prompt] = starterPrompts[prompt]\n\n prompts=newprompts\n\n keys = prompts.keys()\n game_prompt = random.choice(list(keys))\n image = prompts[game_prompt][2]\n session['game_prompt'] = game_prompt\n session['prompts'] = prompts\n session['image'] = image\n session['remain'] = numdays\n\n return session\n\n\n\ndef yesSession():\n game_prompt = session['game_prompt']\n prompts = session['prompts']\n\n yesChange = prompts[game_prompt][0]\n image = prompts[game_prompt][2]\n animal_change, human_change = yesChange\n\n session['animal_metric'] += animal_change\n session['human_metric'] += human_change\n session['remain'] = session['remain'] - 1\n\n prompts.pop(game_prompt)\n\n if not prompts:\n return \n\n keys = prompts.keys()\n newprompt = random.choice(list(keys))\n image = prompts[newprompt][2]\n \n session['image'] = image\n session['game_prompt'] = newprompt\n session['prompts'] = prompts\n\n session.modified = True\n\ndef noSession():\n game_prompt = session['game_prompt']\n prompts = session['prompts']\n\n noChange = prompts[game_prompt][1]\n animal_change, human_change = noChange\n\n session['animal_metric'] += animal_change\n session['human_metric'] += human_change\n session['remain'] = session['remain'] - 1\n\n prompts.pop(game_prompt)\n\n if not prompts:\n return \n\n keys = prompts.keys()\n newprompt = random.choice(list(keys))\n image = prompts[newprompt][2]\n\n session['image'] = image\n session['game_prompt'] = newprompt\n session['prompts'] = prompts\n\n session.modified = True\n\n\ndef hasLostAnimal():\n return int(session['animal_metric'])<=0\n\ndef hasLostHuman():\n return int(session['human_metric'])<=0\n\ndef isEmpty():\n return (session['remain']==0) or (not session['prompts'])\n\n" }, { "alpha_fraction": 0.509829044342041, "alphanum_fraction": 0.5219017267227173, "avg_line_length": 66.34532165527344, "blob_id": "74dd505230834e2833dce8c7b89eb8e52e4bf188", "content_id": "e2b6c55e94251885f2ea75826975b3be110fb1bf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9363, "license_type": "no_license", "max_line_length": 139, "num_lines": 139, "path": "/web-app/prompts.py", "repo_name": "KyleGrace/animal-game", "src_encoding": "UTF-8", "text": "####################\n# PROMPTS FOR GAME #\n####################\nimport random\n\nstarterPrompts = { # Yes: Effect on Animals, Humans; No: Effect on Animals, Humans\n \"\"\"The population is growing! To expand\n your kingdom and provide resources to\n feed everyone, your advisor has suggested\n to clear out a forest to make land for\n agriculture. Accept?\"\"\": [(-3, 2), (1, -4),'/static/reeds.png'],\n \"\"\"Medical technology is expanding. A representative\n from the science society, Darles Charwin, has\n asked for your permission to begin the testing\n and development of a new research project. Grant\n permission?\"\"\": [(-1, 5), (0, -5),'/static/darwin.jpg'],\n \"\"\"You've been visited by the evil twin of the\n artist Catherine Chalmer, who detests\n all insects and wants to use ddt pesticide\n to remove pests so that farmers\n will be happier. Allow ddt pesticide?\"\"\": [(-5, 3), (0, -3),'/static/ants.jpeg'],\n \"\"\"Space-travel technology is being developed,\n and to test out your kingdom's newest\n rocket, your advisors have suggested to\n send a single chimp into space! It'll\n be good for the press! Do you accept?\"\"\": [(-1, 5), (0, -1),'/static/ham.jpg'],\n \"\"\"The neighboring kingdom's shaman, Carcus\n Moates, visits you and asks you to take a trip\n with him into the Underworld. You're curious\n about the animal spirits that live there,\n but you're also concerned about your public\n image and looking like an idiot. Take a trip\n into the Underworld?\"\"\": [(2, -1), (0, 0),'/static/coates.png'],\n \"\"\"Your pet elephant has died naturally of old\n age. Now there's a dead elephant in your room,\n and artist Hamien Dirst wants to keep it for\n an art project. Allow Dirst to keep the\n elephant?\"\"\": [(random.randint(0,5), random.randint(-5,5)), (0, 0),'/static/hirst.jpg'],\n \"\"\"You're visited by a mysterious creature named\n Helena, who offers you a blender with a live\n goldfish in it. Turn on the blender?\"\"\": [(0, -1), (0, 0),'/static/helena.jpg'],\n \"\"\"Your foreign embassador has informed you of the\n new 'life release' trend that's viral across\n Japan! He's super excited to release\n animals, and has asked for your support.\n Allow your kingdom to practice\n life-releasing?\"\"\": [(-3, 2), (0, -1),'/static/release.png'],\n \"\"\"There's a elephant artist that has been gaining\n popularity. Would you like to recognize their\n work by admitting them to the national\n art collection?\"\"\": [(2, 0), (-1, 1),'/static/suda.jpg'],\n \"\"\"You biggest energy company, PB, accidentally\n spilled oil into the ocean. Your coastal\n advisor Bacqueline Jishop suggests a\n million dollar initiative to clean up\n the oil spill. Would you like to\n raise the taxes for this intiative?\"\"\": [(1, -6), (-5, 0),'/static/bishop.jpg'],\n \"\"\"One of your advisors, Udge Ferica, wants to\n start a nutrition program to encourage vegan\n meals in schools. Many parents are against this,\n but Udge believes its a good idea. Would you\n like to provide funding for Udge's program?\"\"\": [(6, -3), (-1, 1),'/static/meat.jpg'],\n \"\"\"The Earl of Claremont, Ill Banthes, has proposed\n animal studies as a new primary school subject. \n Will you implement this new subject?\"\"\": [(3,0),(-1,0),'/static/bill.png'],\n \"\"\"You have been visited by Hilliam Wogarth. He has\n recently published a paper arguing that violence\n towards animals conditions humans towards\n violence. He argues that you make animal\n cruelty punishable by life in prison. Will you\n do so?\"\"\": [(5,2),(-2,-1),'/static/perfection.jpg'],\n \"\"\"A small cat has snuck into your chamber, will you\n let them stay?\"\"\": [(0,0),(0,0),'/static/maiercat.jpg'],\n \"\"\"Izzy, representative of the feline district, has\n come asking for support in providing more tuna\n to their community. Will you provide the tuna?\"\"\":[(3,-1),(-2,0),'/static/standizzy.jpeg'],\n \"\"\"Your space agency has plans to build a large \n telescope in the center of the national forest.\n The telescope will be used to search for aliens.\n Ned Chiang has approached you in opposition of \n this project. He believes it will destroy animal\n habitats and widen the divide between species.\n Will you cancel the project?\"\"\": [(4,-2),(-4,3),'/static/telescope.jpg'],\n \"\"\"Bnæbjörnsdóttir and Milson have approached you \n as representatives of the Old York Animal\n Advocacy Board. They propose new legislation\n requiring all house pets to have their own \n dedicated space within a home. Will you pass\n this legislation?\"\"\": [(4,-3),(-1,0),'/static/worlding.png'],\n \"\"\"You meet with your friend from secondary \n school, Bron Roglio, for coffee. In passing he \n mentions that you should bring on an animal \n advisor. Will you do so?\"\"\": [(3,-1),(-3,0),'/static/broglio.png'],\n \"\"\"Renowned artist Batherine Chell has asked to\n perform at the royal ball next month. She says\n she will need 100 squids for the performance. \n Will you schedule her to perform?\"\"\": [(3,-5),(0,1),'/static/bell.png'],\n \"\"\"Logistics advisor, Kucy Limbell, enters your \n chamber with a box of rats. She introduces them\n as REA, the Rat Empowered Assistant. She suggests\n you allow REA to help you with all kinds of\n things, from creating palace artwork to \n designing the streets of new cities. Will you \n employ the rats?\"\"\": [(2,-5),(-1,3),'/static/rats.png'],\n \"\"\"While hunting rabbits, a royal hunter has brought\n a painting they found in a patch of tall grass.\n You cannot make out the inscription on the back\n other than G__rg_ _tub_s. You like it. Will you\n hang it in your palace?\"\"\": [(1,1),(-1,-1),'/static/lionhorse.jpg'],\n \"\"\"The Reverend of Nano Bio Info Cogno has come to \n you demanding that their religion become officially\n recognized by the state. Will you make a public\n announcement of your support?\"\"\": [(-3,-2),(2,2),'/static/bio.png'],\n \"\"\"The previous monarch banned the use of animal fur\n for clothing purposes, citing that it symbolized\n human dominion over other animals. Udge Ferica,\n your advisor, has suggested you ban fake fur as \n well for the same reasons. Will you ban \n fake fur as well?\"\"\": [(4,-1),(-4,2),'/static/maierfoxscarf.jpg'],\n \"\"\"Gryle Kace has submitted a proposal to create\n a state health insurance program for all animal\n residents. To fund it, he suggests you raise\n taxes on the nobles. Will you support this \n legislation?\"\"\": [(4,-2),(-2,2),'/static/maierdog.jpg'],\n \"\"\"National scientists have discovered a cave beneath\n the site of your new summer home. They say there's\n cave art! Will you demolish the cave to continue \n building your home?\"\"\": [(-3,-2),(2,1),'/static/banksycave.jpg'],\n \"\"\"Your advisor informs you that his best friend has\n come to see you. He walks in with a bright red\n suit. 'You must increase the supply of bones to \n the canine district!!' he cries. Will you do\n so?\"\"\": [(2,-2),(-2,1),'/static/wegmandog.jpg'],\n \"\"\"A movement has been gaining popularity calling\n for the atonement of atrocities commited towards \n animals over prior centuries. Will you financially\n support this movement by raising taxes to provide\n better animal benefits?\"\"\": [(4,-3),(-3,0),'/static/banksydog.jpg'],\n}" }, { "alpha_fraction": 0.589612603187561, "alphanum_fraction": 0.5947211384773254, "avg_line_length": 22.5, "blob_id": "8826b05a0e80206e2eee4c581b3b9e7fadcb5b16", "content_id": "1cb488b7bc8d13112887d4f523c0a4c34c529f8c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2349, "license_type": "no_license", "max_line_length": 86, "num_lines": 100, "path": "/web-app/main.py", "repo_name": "KyleGrace/animal-game", "src_encoding": "UTF-8", "text": "from flask import Flask, render_template, jsonify, request, session, redirect, url_for\nimport random,secrets\nimport json\nfrom user import *\n\napp = Flask(__name__,static_url_path='/static')\n\n# app.secret_key = secrets.token_bytes(32)\napp.secret_key = \"EASYKEY\"\n\n#########\n# Pages #\n#########\n\n@app.route(\"/\")\ndef index():\n resp = render_template('landing.html')\n return resp\n\n@app.route(\"/game\")\ndef game():\n #initializes session\n initializeSession()\n\n resp = render_template(\"game.html\")\n return resp\n\n@app.route(\"/about\")\ndef about():\n resp = render_template(\"about.html\")\n return resp\n\n@app.route(\"/lostanimal\")\ndef loser_animal():\n resp = render_template(\"lostAnimal.html\")\n return resp\n\n@app.route(\"/losthuman\")\ndef loser_human():\n resp = render_template(\"lostHuman.html\")\n return resp\n\n@app.route(\"/win\")\ndef open_end():\n resp = render_template(\"open-ended.html\")\n return resp\n\n@app.route(\"/winner\")\ndef winner():\n resp = render_template(\"win.html\")\n return resp\n\nfrom prompts import *\n@app.route(\"/gallery\")\ndef gallery():\n resp = render_template(\"gallery.html\", starterPrompts=starterPrompts)\n return resp\n\n###########\n# Methods #\n###########\n\n@app.route(\"/yes\", methods=[\"POST\"])\ndef optionyes():\n yesSession()\n\n if hasLostAnimal():\n return redirect(url_for('loser_animal'))\n elif hasLostHuman():\n return redirect(url_for('loser_human'))\n elif isEmpty():\n return redirect(url_for('winner'))\n\n response_string = (str(session['animal_metric']) + \"#\" \n + str(session['human_metric']) + \"#\" \n + session['game_prompt'] + \"#\" \n + session['image'])\n return response_string\n\n@app.route(\"/no\", methods=[\"POST\"])\ndef optionno():\n noSession()\n\n if hasLostAnimal():\n return redirect(url_for('loser_animal'))\n elif hasLostHuman():\n return redirect(url_for('loser_human'))\n elif isEmpty():\n return redirect(url_for('winner'))\n\n response_string = (str(session['animal_metric']) + \"#\" \n + str(session['human_metric']) + \"#\" \n + session['game_prompt'] + \"#\" \n + session['image'])\n return response_string\n \n\n# # Used to test locally\nif __name__ == \"__main__\":\n app.run(host=\"127.0.0.1\",port=8080,debug=True)" }, { "alpha_fraction": 0.7644710540771484, "alphanum_fraction": 0.7704590559005737, "avg_line_length": 61.625, "blob_id": "c743c8850a5d8cd94b4d036d56dfa13e266ba509", "content_id": "e99eba042253c64780b2db1d71ffbf947391188e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 501, "license_type": "no_license", "max_line_length": 304, "num_lines": 8, "path": "/web-app/AreasOfImprovement.md", "repo_name": "KyleGrace/animal-game", "src_encoding": "UTF-8", "text": "## Known issues\n\n* Cookies grow too large after about 26 prompts so we randomly sample down to numdays + 1 (days required to win). If the game needs to be run longer, we must rework prompts to being based off of ideas so they can be moved out of cookies. Requiring users to only store a list of ID ints in their session. \n\n## QOL Improvements\n\n* Add titles and authors to image with each prompt.\n* Add conditional prompts, only added into the pool once certain prompts have been accepted or declined. " }, { "alpha_fraction": 0.47731396555900574, "alphanum_fraction": 0.5130066275596619, "avg_line_length": 24.045454025268555, "blob_id": "d145a4466a4ffa873070b451efe8749061880e4b", "content_id": "7c1de3777d26fafb71d333171e9c3ed8fd6c1454", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1653, "license_type": "no_license", "max_line_length": 135, "num_lines": 66, "path": "/console-game.py", "repo_name": "KyleGrace/animal-game", "src_encoding": "UTF-8", "text": "import time\nimport sys\nimport random\n\n#########################\n# Helper Functions #\n#########################\n\n\ndef delay_print(s,delay):\n for c in s:\n sys.stdout.write(c)\n sys.stdout.flush()\n time.sleep(delay)\n print()\n\ndef addMetrics(currTup, choiceTup):\n return (currTup[0]+choiceTup[0],\n currTup[1]+choiceTup[1],\n currTup[2]+choiceTup[2],\n currTup[3]+choiceTup[3])\n\ndef win():\n delay_print(\"\\nYou have won...\",0.3)\n\ndef lose():\n delay_print(\"\\nYou have lost...\",0.3)\n\n# Structured as\n# OPTION : (Animal, Wealth, Peace, Humans)\nprompts = {\n \"Kill all dogs?\" : (-10,2,-1,-1),\n \"Kill all cats?\" : (-10,2,-1,-1),\n \"Give subsidy for owning pets?\" : (5,-2,0,1),\n}\n\nmetrics = (10,10,10,10)\nmaxMetrics = (20,20,20,20)\n\nplaying = True\ndays = 0\nwhile(playing):\n days += 1\n print(f'|------------------ day {days} ---------------------|')\n print(f'Current Levels:\\nAnimals {metrics[0]} Wealth {metrics[1]} Peace {metrics[2]} Humans {metrics[3]}\\n')\n\n if not prompts:\n win()\n playing = False\n break\n keys = prompts.keys()\n currKey = random.choice(list(keys))\n delay_print(currKey,0.1)\n \n choice = input(\"Yes or No? \")\n\n if (choice[0].lower()) == 'y':\n metrics = addMetrics(metrics,prompts[currKey])\n\n for met in metrics:\n if met <= 0:\n delay_print(f'\\nLevels:\\nAnimals {metrics[0]} Wealth {metrics[1]} Peace {metrics[2]} Humans {metrics[3]}\\n',0.05)\n lose()\n playing=False\n \n del prompts[currKey]\n" }, { "alpha_fraction": 0.7963351011276245, "alphanum_fraction": 0.7994764447212219, "avg_line_length": 105.11111450195312, "blob_id": "35d3cfba6828f530f3754d20df622336b043454e", "content_id": "33a57390dd5fe2f19473618022fab6f8e33ecf1f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1910, "license_type": "no_license", "max_line_length": 823, "num_lines": 18, "path": "/README.md", "repo_name": "KyleGrace/animal-game", "src_encoding": "UTF-8", "text": "# WELCOME TO KINGDOM\n\nInspired by Pitzer's Art and Animals course, we created a project to explore the relationship between human and non-human animals. At the time of this writing, this game currently lives at https://kingdom-310822.wl.r.appspot.com.\n\n## Project Statement\n\nIn the intermingled worlds of human and non-human animals, we live our day-to-day lives. Each decision we make, no matter how big or how small, impact not only human communities, but various animal communities as well. Often, actions that benefit one community may end up hurting the other. Can humans and animals peacefully cohabitate?\n\nThis project was created with the intention to explore the many different ways in which human decisions impact animal lives. One of our main goals was to revisit the important ideas from various authors, artists, and philosophers that we have read about throughout this course, and consider how all these ideas can be tied together when thinking about the dynamic between human and animal.\n\nWe wanted to put the player in the shoes of a leader who must make significant choices, and allow them to see how socio political decisions may impact the human and animal communities. One of our challenges was encoding the complicated dynamics of the community into two binaries: human vs nonhuman. In addition, we wanted the game to reflect the actual impact on communities, not the human sentiment. For example, in the artwork Helena by Marco Evaristti, one goldfish was killed by an audience participant. This lead to many claims of animal cruelty. However, compared to the amount of animal product in traditional artworks via paints, brushes, and mediums, the overall production of Helena had a negligible effect on the animal community in general. In general, we wanted our game to capture the realistic impact on communities.\n\n_No animals were harmed in the making of this project._ \n\n \n \nKyle Grace \nElissa Hou\n" }, { "alpha_fraction": 0.7327249050140381, "alphanum_fraction": 0.7731420993804932, "avg_line_length": 50.13333511352539, "blob_id": "393f449c0a2fd927f4bd686d2e26c65146dc1cd1", "content_id": "7d02705879c23d5a4e582427a513c1c86f146885", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 767, "license_type": "no_license", "max_line_length": 122, "num_lines": 15, "path": "/web-app/images.py", "repo_name": "KyleGrace/animal-game", "src_encoding": "UTF-8", "text": "images = [\n \n]\n\n\n# Location of Images not from class:\n# Darwin : http://t2.gstatic.com/licensed-image?q=tbn:ANd9GcQcDj27aQRKsSZEQljgWP04xvPtL3RO4rQQxW1zb6ms6PSu27AbZ459tWWG-uB3\n# Telescope : https://www.sciencemag.org/news/2021/01/how-famed-arecibo-telescope-fell-and-how-it-might-rise-again\n# Bell : http://gamma.library.temple.edu/sciencemeetsart/items/show/6\n# rats : http://www.lucykimbell.com/stuff/Kimbell_rats_Tamara_public.pdf\n# Broglio : https://english.asu.edu/content/ron-broglio\n# Ants : https://www.catherinechalmers.com/#/test-idol-offering/\n# Helena : http://challengersofart.blogspot.com/2014/07/marco-evaristti-helena.html\n# Suda : https://www.youtube.com/watch?v=2jKWQKrnvIc\n# Bishop : http://arthurrogergallery.com/exhibition/jacqueline-bishop/\n" } ]
7
krouser/cfripper
https://github.com/krouser/cfripper
44ed79d80f836bbc903413308564a6619909d0cd
5d64c1a54d777c7c12e5adbd56428e87e2ad79b3
d7204ef8f848ac28b6338743bc0b3016a22e8115
refs/heads/master
2022-12-28T13:42:57.518045
2020-10-01T08:26:26
2020-10-01T08:26:26
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.646733820438385, "alphanum_fraction": 0.646733820438385, "avg_line_length": 44.25, "blob_id": "594153be031dddf6d7fafc876c09ec3c9d2524ef", "content_id": "a1a74906f9cd39e1f4835c3fb0c315561d54131d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3077, "license_type": "permissive", "max_line_length": 117, "num_lines": 68, "path": "/cfripper/rules/sqs_queue_policy.py", "repo_name": "krouser/cfripper", "src_encoding": "UTF-8", "text": "__all__ = [\"SQSQueuePolicyNotPrincipalRule\", \"SQSQueuePolicyPublicRule\"]\n\nimport logging\nfrom typing import Dict, Optional\n\nfrom pycfmodel.model.cf_model import CFModel\nfrom pycfmodel.model.resources.sqs_queue_policy import SQSQueuePolicy\n\nfrom cfripper.config.regex import REGEX_HAS_STAR_OR_STAR_AFTER_COLON\nfrom cfripper.model.enums import RuleGranularity, RuleRisk\nfrom cfripper.model.result import Result\nfrom cfripper.rules.base_rules import Rule\n\nlogger = logging.getLogger(__file__)\n\n\nclass SQSQueuePolicyNotPrincipalRule(Rule):\n \"\"\"\n Checks if an SQS Queue policy has an Allow + a NotPrincipal.\n\n Risk:\n AWS **strongly** recommends against using `NotPrincipal` in the same policy statement as `\"Effect\": \"Allow\"`.\n Doing so grants the permissions specified in the policy statement to all principals except the one named\n in the `NotPrincipal` element. By doing this, you might grant access to anonymous (unauthenticated) users.\n \"\"\"\n\n GRANULARITY = RuleGranularity.RESOURCE\n REASON = \"SQS Queue {} policy should not allow Allow and NotPrincipal at the same time\"\n\n def invoke(self, cfmodel: CFModel, extras: Optional[Dict] = None) -> Result:\n result = Result()\n for logical_id, resource in cfmodel.Resources.items():\n if isinstance(resource, SQSQueuePolicy):\n for statement in resource.Properties.PolicyDocument._statement_as_list():\n if statement.NotPrincipal:\n self.add_failure_to_result(result, self.REASON.format(logical_id), resource_ids={logical_id})\n return result\n\n\nclass SQSQueuePolicyPublicRule(Rule):\n \"\"\"\n Checks for wildcard principals in Allow statements in an SQS Queue Policy.\n\n Risk:\n This is deemed a potential security risk as anyone would be able to interact with your queue.\n \"\"\"\n\n REASON = \"SQS Queue policy {} should not be public\"\n RISK_VALUE = RuleRisk.HIGH\n\n def invoke(self, cfmodel: CFModel, extras: Optional[Dict] = None) -> Result:\n result = Result()\n for logical_id, resource in cfmodel.Resources.items():\n if isinstance(resource, SQSQueuePolicy) and resource.Properties.PolicyDocument.allowed_principals_with(\n REGEX_HAS_STAR_OR_STAR_AFTER_COLON\n ):\n for statement in resource.Properties.PolicyDocument._statement_as_list():\n if statement.Effect == \"Allow\" and statement.principals_with(REGEX_HAS_STAR_OR_STAR_AFTER_COLON):\n if statement.Condition and statement.Condition.dict():\n logger.warning(\n f\"Not adding {type(self).__name__} failure in {logical_id} \"\n f\"because there are conditions: {statement.Condition}\"\n )\n else:\n self.add_failure_to_result(\n result, self.REASON.format(logical_id), resource_ids={logical_id}\n )\n return result\n" }, { "alpha_fraction": 0.711922824382782, "alphanum_fraction": 0.711922824382782, "avg_line_length": 42.969696044921875, "blob_id": "ee61037de16bf3af91dafae9c83713e4620695b2", "content_id": "c84057495c36187be70af318daf57b5d154429c0", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1451, "license_type": "permissive", "max_line_length": 117, "num_lines": 33, "path": "/cfripper/rules/sns_topic_policy_not_principal.py", "repo_name": "krouser/cfripper", "src_encoding": "UTF-8", "text": "__all__ = [\"SNSTopicPolicyNotPrincipalRule\"]\n\nfrom typing import Dict, Optional\n\nfrom pycfmodel.model.cf_model import CFModel\nfrom pycfmodel.model.resources.sns_topic_policy import SNSTopicPolicy\n\nfrom cfripper.model.enums import RuleGranularity\nfrom cfripper.model.result import Result\nfrom cfripper.rules.base_rules import Rule\n\n\nclass SNSTopicPolicyNotPrincipalRule(Rule):\n \"\"\"\n Checks if an SNS topic policy has an Allow + a NotPrincipal.\n\n Risk:\n AWS **strongly** recommends against using `NotPrincipal` in the same policy statement as `\"Effect\": \"Allow\"`.\n Doing so grants the permissions specified in the policy statement to all principals except the one named\n in the `NotPrincipal` element. By doing this, you might grant access to anonymous (unauthenticated) users.\n \"\"\"\n\n GRANULARITY = RuleGranularity.RESOURCE\n REASON = \"SNS Topic {} policy should not allow Allow and NotPrincipal at the same time\"\n\n def invoke(self, cfmodel: CFModel, extras: Optional[Dict] = None) -> Result:\n result = Result()\n for logical_id, resource in cfmodel.Resources.items():\n if isinstance(resource, SNSTopicPolicy):\n for statement in resource.Properties.PolicyDocument._statement_as_list():\n if statement.NotPrincipal:\n self.add_failure_to_result(result, self.REASON.format(logical_id), resource_ids={logical_id})\n return result\n" }, { "alpha_fraction": 0.7607192397117615, "alphanum_fraction": 0.7662517428398132, "avg_line_length": 35.150001525878906, "blob_id": "43e1c8f14f556df23e1f3c5f4f4d0214ae212894", "content_id": "d7dfed8d5da9be1b32d8609040fdd45696ce0c82", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 723, "license_type": "permissive", "max_line_length": 100, "num_lines": 20, "path": "/tests/rules/test_PrivilegeEscalationRule.py", "repo_name": "krouser/cfripper", "src_encoding": "UTF-8", "text": "import pytest\n\nfrom cfripper.rules.privilege_escalation import PrivilegeEscalationRule\nfrom tests.utils import get_cfmodel_from\n\n\n@pytest.fixture()\ndef valid_role_inline_policy():\n return get_cfmodel_from(\"rules/PrivilegeEscalationRule/valid_role_inline_policy.json\").resolve()\n\n\ndef test_valid_role_inline_policy(valid_role_inline_policy):\n rule = PrivilegeEscalationRule(None)\n result = rule.invoke(valid_role_inline_policy)\n\n assert not result.valid\n assert len(result.failed_rules) == 1\n assert len(result.failed_monitored_rules) == 0\n assert result.failed_rules[0].rule == \"PrivilegeEscalationRule\"\n assert result.failed_rules[0].reason == \"PolicyA has blacklisted IAM action iam:createpolicy\"\n" }, { "alpha_fraction": 0.6488011479377747, "alphanum_fraction": 0.6497414112091064, "avg_line_length": 37.672725677490234, "blob_id": "ae7b4b9de77e7cc5a39fe87ab77df5fa5a846a4a", "content_id": "442104546ce5e63644aebe9733cc23cb16651c55", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2127, "license_type": "permissive", "max_line_length": 137, "num_lines": 55, "path": "/cfripper/rules/privilege_escalation.py", "repo_name": "krouser/cfripper", "src_encoding": "UTF-8", "text": "__all__ = [\"PrivilegeEscalationRule\"]\n\nfrom typing import Dict, Optional\n\nfrom pycfmodel.model.cf_model import CFModel\nfrom pycfmodel.model.resources.iam_policy import IAMPolicy\n\nfrom cfripper.model.enums import RuleGranularity\nfrom cfripper.model.result import Result\nfrom cfripper.rules.base_rules import Rule\n\n\nclass PrivilegeEscalationRule(Rule):\n \"\"\"\n Checks for any dangerous IAM actions that could allow privilege escalation and potentially\n represent a large security risk.\n See [current blacklisted IAM actions](https://github.com/Skyscanner/cfripper/blob/master/cfripper/rules/privilege_escalation.py#L29).\n\n Fix:\n Unless strictly necessary, do not use actions in the IAM action blacklist. CloudFormation files that do require these\n actions should be added to the whitelist.\n \"\"\"\n\n GRANULARITY = RuleGranularity.RESOURCE\n REASON = \"{} has blacklisted IAM action {}\"\n IAM_BLACKLIST = set(\n action.lower()\n for action in [\n \"iam:CreateAccessKey\",\n \"iam:CreateLoginProfile\",\n \"iam:UpdateLoginProfile\",\n \"iam:AttachUserPolicy\",\n \"iam:AttachGroupPolicy\",\n \"iam:AttachRolePolicy\",\n \"iam:PutUserPolicy\",\n \"iam:PutGroupPolicy\",\n \"iam:PutRolePolicy\",\n \"iam:CreatePolicy\",\n \"iam:AddUserToGroup\",\n \"iam:UpdateAssumeRolePolicy\",\n \"iam:CreatePolicyVersion\",\n \"iam:SetDefaultPolicyVersion\",\n ]\n )\n\n def invoke(self, cfmodel: CFModel, extras: Optional[Dict] = None) -> Result:\n result = Result()\n for logical_id, resource in cfmodel.Resources.items():\n if isinstance(resource, IAMPolicy):\n policy_actions = set(action.lower() for action in resource.Properties.PolicyDocument.get_iam_actions())\n for violation in policy_actions.intersection(self.IAM_BLACKLIST):\n self.add_failure_to_result(\n result, self.REASON.format(logical_id, violation), resource_ids={logical_id}\n )\n return result\n" } ]
4
IbnuAhsani/semantic-web
https://github.com/IbnuAhsani/semantic-web
bcb386addd9423fee4d0fc4d5668d380056a9469
1e6d8e1ea80f325f1f6a1c45e98ac495c664c70a
f8f851dbe8a73c7eb483b0ebec5940208b229357
refs/heads/master
2020-05-19T16:41:43.004463
2019-05-13T01:34:53
2019-05-13T01:34:53
185,116,855
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.621052622795105, "alphanum_fraction": 0.6631578803062439, "avg_line_length": 16.600000381469727, "blob_id": "4be00253c379b1cb72f8d3d304dde6cab9f32cf3", "content_id": "f0ae39b1994f8a29d68c94e9fee2f8dd3e8a264d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 95, "license_type": "no_license", "max_line_length": 46, "num_lines": 5, "path": "/sparql/examples/ex340.ru", "repo_name": "IbnuAhsani/semantic-web", "src_encoding": "UTF-8", "text": "# filename: ex340.ru\r\n\r\nPREFIX d: <http://learningsparql.com/ns/data#>\r\n\r\nCREATE GRAPH d:g3\r\n\r\n" }, { "alpha_fraction": 0.5164835453033447, "alphanum_fraction": 0.6593406796455383, "avg_line_length": 28.33333396911621, "blob_id": "d0052164c15dea22404df2dd370d11757e9004dd", "content_id": "59571e552e4f124cef6eb2e5f0d458d68a06d92e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 91, "license_type": "no_license", "max_line_length": 65, "num_lines": 3, "path": "/sparql/examples/ex551.ru", "repo_name": "IbnuAhsani/semantic-web", "src_encoding": "UTF-8", "text": "# filename: ex551.ru\r\n\r\nDELETE WHERE { <http://www.worldcat.org/isbn/0062515861> ?p ?o }\r\n" }, { "alpha_fraction": 0.6178175806999207, "alphanum_fraction": 0.6341592073440552, "avg_line_length": 24.605634689331055, "blob_id": "e191d860e2a713410ec9757e59828c0d7f53e0dd", "content_id": "db047ea483a586d51bb6c6a6af588733017268fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1897, "license_type": "no_license", "max_line_length": 73, "num_lines": 71, "path": "/sparql/examples/ex364-cgi.txt", "repo_name": "IbnuAhsani/semantic-web", "src_encoding": "UTF-8", "text": "#!/usr/local/bin/python\r\n# filename: ex364.cgi\r\n# CGI version of ex363.py\r\n\r\nimport sys\r\nsys.path.append('/usr/home/bobd/lib/python/') # needed for hosted version\r\nfrom SPARQLWrapper import SPARQLWrapper, JSON\r\nimport cgi\r\n\r\nform = cgi.FieldStorage() \r\ndirector1 = form.getvalue('dir1')\r\ndirector2 = form.getvalue('dir2')\r\n\r\nsparql = SPARQLWrapper(\"http://data.linkedmdb.org/sparql\")\r\nqueryString = \"\"\"\r\nPREFIX m: <http://data.linkedmdb.org/resource/movie/>\r\nPREFIX foaf: <http://xmlns.com/foaf/0.1/>\r\n\r\nSELECT DISTINCT ?actorName ?freebaseURI WHERE {\r\n\r\n ?dir1 m:director_name \"DIR1-NAME\" .\r\n ?dir2 m:director_name \"DIR2-NAME\" .\r\n\r\n ?dir1film m:director ?dir1 ;\r\n m:actor ?actor .\r\n\r\n ?dir2film m:director ?dir2 ;\r\n m:actor ?actor .\r\n\r\n ?actor m:actor_name ?actorName ;\r\n foaf:page ?freebaseURI . \r\n}\r\n\"\"\"\r\n\r\nqueryString = queryString.replace(\"DIR1-NAME\",director1)\r\nqueryString = queryString.replace(\"DIR2-NAME\",director2)\r\nsparql.setQuery(queryString)\r\n\r\nsparql.setReturnFormat(JSON)\r\n\r\ntry:\r\n results = sparql.query().convert()\r\n requestGood = True\r\nexcept Exception, e:\r\n results = str(e)\r\n requestGood = False\r\n\r\nprint \"\"\"Content-type: text/html\r\n\r\n<html><head><title>results</title>\r\n<style type=\"text/css\"> * { font-family: arial,helvetica}</style>\r\n</head><body>\r\n\"\"\"\r\n\r\nif requestGood == False:\r\n print \"<h1>Problem communicating with the server</h1>\"\r\n print \"<p>\" + results + \"</p>\"\r\nelif (len(results[\"results\"][\"bindings\"]) == 0):\r\n print \"<p>No results found.</p>\"\r\n\r\nelse:\r\n\r\n print \"<h1>Actors directed by both \" + director1 + \\\r\n \" and \" + director2 + \"</h1>\"\r\n\r\n for result in results[\"results\"][\"bindings\"]:\r\n actorName = result[\"actorName\"][\"value\"]\r\n freebaseURI = result[\"freebaseURI\"][\"value\"]\r\n print \"<p><a href=\\\"\" + freebaseURI + \"\\\">\" + actorName + \"</p>\"\r\n\r\nprint \"</body></html>\" \r\n\r\n\r\n" } ]
3
sumodgeorge/syft
https://github.com/sumodgeorge/syft
def3e4335ca47108569b078aa568c19c2df92b3c
0ed30138c4c5313ce7b5610842172b8f601a9947
7adac45f9db64b69a6235b9f02a67a340d28be9b
refs/heads/main
2023-01-14T15:50:34.093176
2020-11-19T17:38:20
2020-11-19T17:38:20
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7960829734802246, "alphanum_fraction": 0.804147481918335, "avg_line_length": 95.55555725097656, "blob_id": "be2f94b3923088bd4cf5ba4959dd71ae2ffc48b9", "content_id": "c853322b85dbba5e3f02ec06364a1be34b7aa02c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 868, "license_type": "permissive", "max_line_length": 270, "num_lines": 9, "path": "/schema/README.md", "repo_name": "sumodgeorge/syft", "src_encoding": "UTF-8", "text": "## Updating the JSON schema\nToday the JSON schema is generated from integration test data. Specifically, when integration tests are run, the `/schema/json/examples` directory is populated with syft JSON output data. This examples directory is used to drive automatically generating the JSON schema.\nThe caveats with this approach is:\n1) the JSON schema is only as good as the examples provided\n2) there is an integration test that ensures that the JSON schema is valid relative to what the code currently generates.\nThis means to update the JSON schema you need to\n1) Open up `test/integration/json_schema_test.go` and comment out invocations of the `validateAgainstV1Schema` function.\n2) From the root of the repo run `generate-json-schema`. Now there should be a new schema generated at `/schema/json/schema.json`\n3) Uncomment the `validateAgainstV1Schema` function." }, { "alpha_fraction": 0.6181555986404419, "alphanum_fraction": 0.621037483215332, "avg_line_length": 22.16666603088379, "blob_id": "0d8ca89394b3e7b5ddf00ac9858dc2ac1a0bc4a7", "content_id": "d7f8ad0b3ec22608ecf8f801752fefe7f30cffe3", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 694, "license_type": "permissive", "max_line_length": 68, "num_lines": 30, "path": "/schema/json/generate.py", "repo_name": "sumodgeorge/syft", "src_encoding": "UTF-8", "text": "#!/usr/env/bin python3\nimport os\nimport glob\nimport json\n\nfrom genson import SchemaBuilder\n\nEXAMPLES_DIR = \"examples/\"\nOUTPUT = \"schema.json\"\n\n\ndef main():\n builder = SchemaBuilder()\n\n print(\"Generating new Syft json schema...\")\n for filepath in glob.glob(os.path.join(EXAMPLES_DIR, '*.json')):\n with open(filepath, 'r') as f:\n print(f\" adding {filepath}\")\n builder.add_object(json.loads(f.read()))\n\n print(\"Building schema...\")\n new_schema = builder.to_schema()\n with open(OUTPUT, 'w') as f:\n f.write(json.dumps(new_schema, sort_keys=True, indent=4))\n\n print(f\"New schema written to '{OUTPUT}'\")\n\n\nif __name__ == \"__main__\":\n main()" } ]
2
brunaeduarda/ProjetoAED
https://github.com/brunaeduarda/ProjetoAED
6b4ed9aed1f308c73d8df0751dafb7effda575bb
0619e3866c3c725842cbb774755b5ce3dc3edce7
a6849e3c6f068270ed094c64fc723b6a22475106
refs/heads/main
2023-03-03T13:14:07.116910
2021-02-16T02:03:20
2021-02-16T02:03:20
339,260,050
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5697753429412842, "alphanum_fraction": 0.6065350770950317, "avg_line_length": 20.984375, "blob_id": "6577492767dda927f1ff869dcad0b923890a7132", "content_id": "a417610470fba8e2ad343c13028b32b6652f454b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1482, "license_type": "no_license", "max_line_length": 117, "num_lines": 64, "path": "/estrutura.py", "repo_name": "brunaeduarda/ProjetoAED", "src_encoding": "UTF-8", "text": "from Class.BaseContatos import Base\r\n\r\n'''\r\n1. Cada contato deve possuir os itens:\r\n1.1. um Código (obrigatório) ok\r\n1.2. Nome (obrigatório) ok\r\n1.3. Telefone (obrigatório) ok\r\n1.4. e-mail (obrigatório) ok\r\n1.5. Lista de interesses (#Item) \r\n1.6. Lista de contatos.\r\n\r\n2. Funçõesque o sistema deve possuir:\r\n2.1. Salvar os contatos em Arquivo (por ordem alfabética)\r\n2.2. Buscar contatos por nome ou e-mail ok\r\n2.3. Buscar contatos por item de interesse.\r\n2.4. Organizar contatos por \"contatos em comum\"\r\n (ex.: pega dois contatos e mostra os contatos em comum na rede de contatos)\r\n2.5. Mostrar contatos por algum outro critérioescolhido pelo candidato, entre as Informações do contato.\r\n\r\n3. Obrigatório o uso de estruturas de dados mostrados na disciplina\r\n3.1 Listas ligadas (fila, lista, pilha); (CADASTRO)\r\n3.2 Árvores;\r\n3.3 Grafos.\r\n'''\r\n\r\n\r\n\r\n\r\n'''\r\nMenu\r\n\r\n1- Cadastro ok\r\n2- excluir ok\r\n3- exibir contatos ok\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n'''\r\n\r\n\r\n\r\nnome = 'victor'\r\ntel = '99159407'\r\nemail = 'victor@email.com'\r\ncodigo = 1\r\n\r\nnome2 = 'teste'\r\ntel2 = 'teste'\r\nemail2 = 'teste@email.com'\r\n\r\nbase = Base()\r\nbase.cadastrar(nome,tel,email, 1)\r\nbase.cadastrar(nome,tel,email, 2)\r\nbase.cadastrar(nome,tel,email, 3)\r\nbase.cadastrar(nome2,tel2,email2, 2)\r\nbase.exibirtodos()\r\nprint('...')\r\nbase.excluir(3)\r\nbase.exibirtodos()" }, { "alpha_fraction": 0.678260862827301, "alphanum_fraction": 0.678260862827301, "avg_line_length": 20.322580337524414, "blob_id": "b3b0c231e170f9e570b80810e278e5193a0d1da2", "content_id": "085e66d611b20c7b1b39e9c02a220b1956316e29", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 691, "license_type": "no_license", "max_line_length": 46, "num_lines": 31, "path": "/Menu.py", "repo_name": "brunaeduarda/ProjetoAED", "src_encoding": "UTF-8", "text": "'''\r\n\r\n-Inserir contato ok\r\n---interesses\r\n-----cadastro de interesses ok\r\n-----lista de interesses ok\r\n-----inserir interesse ao contato\r\n-----remover interesse do contato\r\n-Lista de contatos ok\r\n-Buscar contatos\r\n---Nome ok\r\n---Email ok\r\n---Código ok\r\n---Listar por interesse\r\n---Lista de contatos em comum\r\n-Editar Contatos\r\n---Nome ok\r\n---Telefone ok\r\n---Email ok\r\n---Lista de contatos no contato\r\n-----adicionar contato\r\n-----remover contato\r\n-----ver contatos\r\n---Lista de interesse\r\n-----Ver todos os interesses do contato\r\n-----listar todos os interesses cadastrados ok\r\n-----cadastrar novo interesse ok\r\n-----adicionar interesse ao contato\r\n-----Remover interesse do contato\r\n\r\n'''" }, { "alpha_fraction": 0.4910791218280792, "alphanum_fraction": 0.4957548975944519, "avg_line_length": 31.85416603088379, "blob_id": "65c7f28ff8d67ad800774152a9294b8cd9a9f3c6", "content_id": "6f678935577a6e3c86cc07acd4e68f1005473b1d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8128, "license_type": "no_license", "max_line_length": 126, "num_lines": 240, "path": "/ProjetoAED (1)/Class/BaseContatos.py", "repo_name": "brunaeduarda/ProjetoAED", "src_encoding": "UTF-8", "text": "from Class.contato import Contato\r\nfrom Class.Interesse import Interesse\r\n\r\nclass Base:\r\n def __init__(self):\r\n self.inicio = None\r\n self.fim = None\r\n self.tamanho = 0\r\n self.qinteresses = 0\r\n\r\n\r\n def cadastrar(self, nome, tel, email, codigo):\r\n if (codigo != None):\r\n Novo = Contato(codigo)\r\n else:\r\n codigo = self.tamanho+1\r\n Novo = Contato(codigo)\r\n Novo.set_nome(nome)\r\n Novo.set_telefone(tel)\r\n Novo.set_email(email)\r\n if (self.tamanho <= 0):\r\n self.inicio = self.fim = Novo #Se o tamanho for 0 independente do tamanho inserido, sera cadasatrado como primeiro\r\n else:\r\n if (codigo <= 1): #inserir inicio\r\n self.inicio.set_anterior(Novo)\r\n Novo.set_proximo(self.inicio)\r\n self.inicio = Novo\r\n if (codigo >= self.tamanho): #inserir fim\r\n self.fim.set_proximo(Novo)\r\n Novo.set_anterior(self.fim)\r\n self.fim = Novo\r\n else: #inserir em qualquer posicao\r\n antecessor = sucessor = self.inicio\r\n while (antecessor.get_codigo() != codigo-1):\r\n antecessor = antecessor.get_proximo()\r\n sucessor = antecessor.get_proximo()\r\n Novo.set_proximo(sucessor)\r\n Novo.set_anterior(antecessor)\r\n sucessor.set_anterior(Novo)\r\n antecessor.set_proximo(Novo)\r\n antecessor = sucessor = None\r\n self.organizarcodigos()\r\n self.tamanho += 1\r\n\r\n def is_empty(self):\r\n if (self.tamanho == 0): return True\r\n return False\r\n\r\n def procurarnome (self, nome): #pesquisa nome\r\n temp = self.inicio\r\n if (self.is_empty()):\r\n return False\r\n while(temp != None):\r\n if (indice > self.tamanho): return False\r\n if (temp.nome == nome): return temp\r\n temp = temp.get_proximo()\r\n return False\r\n\r\n def procuraremail (self, email): #pesquisa nome\r\n temp = self.inicio\r\n if (self.is_empty()):\r\n return False\r\n while(temp != None):\r\n if (indice > self.tamanho): return False\r\n if (temp.email == email): return temp\r\n temp = temp.get_proximo()\r\n return False\r\n\r\n def procurarcodigo (self, codigo):#Procurar Codigo\r\n if (self.is_empty()):\r\n return False\r\n if (codigo >= self.tamanho):\r\n return self.fim\r\n if (codigo <= 1):\r\n return self.inicio\r\n else:\r\n temp = self.inicio\r\n while(codigo != temp.get_codigo()):\r\n temp = temp.get_proximo()\r\n return temp\r\n\r\n def printcontato (self, contato):\r\n print(\"Codigo: {}\".format(contato.get_codigo()))\r\n print(\"Nome: {}\".format(contato.get_nome()))\r\n print(\"Email: {}\".format(contato.get_email()))\r\n print(\"Telefone: {}\".format(contato.get_telefone()))\r\n\r\n def organizarcodigos (self):\r\n temp = self.inicio\r\n codigo = 1\r\n if (self.is_empty()):\r\n return False\r\n while (temp != None):\r\n temp.set_codigo(codigo)\r\n codigo += 1\r\n temp = temp.get_proximo()\r\n return True\r\n\r\n def exibirtodos (self):\r\n if (self.is_empty()):\r\n print('Lista vazia')\r\n temp = self.inicio\r\n while(temp != None):\r\n self.printcontato(temp)\r\n temp = temp.get_proximo()\r\n\r\n def excluir (self, codigo):\r\n if (self.is_empty()):\r\n return False\r\n if (codigo <= 1):\r\n if(self.tamanho==1):\r\n self.inicio = self.fim = None\r\n self.tamanho -=1\r\n return True\r\n temp = self.inicio.get_proximo()\r\n temp.set_anterior(None)\r\n self.inicio.set_proximo(None)\r\n self.inicio = temp\r\n if (codigo >= self.tamanho):\r\n if (self.tamanho==1):\r\n self.excluir(1)\r\n return True\r\n temp = self.fim.get_anterior()\r\n temp.set_proximo(None)\r\n self.fim.set_anterior(None)\r\n self.fim = temp\r\n else:\r\n temp = self.procurarcodigo(codigo)\r\n temp2 = temp.get_anterior()\r\n temp2.set_proximo(temp.get_proximo())\r\n temp2 = temp.get_proximo()\r\n temp2.set_anterior(temp.get_anterior())\r\n temp = None\r\n self.organizarcodigos()\r\n self.tamanho -= 1\r\n\r\n def editarnome (self, codigo, nome):\r\n if (self.is_empty()):\r\n return False\r\n if (codigo <= 1):\r\n self.inicio.set_nome(nome)\r\n if (codigo >= self.tamanho):\r\n self.fim.set_nome(nome)\r\n else:\r\n temp = self.inicio\r\n while(codigo != temp.get_codigo()):\r\n temp = temp.get_proximo()\r\n temp.set_nome(nome)\r\n return True\r\n\r\n def editaremail (self, codigo, email):\r\n if (self.is_empty()):\r\n return False\r\n if (codigo <= 1):\r\n self.inicio.set_email(email)\r\n if (codigo >= self.tamanho):\r\n self.fim.set_email(email)\r\n else:\r\n temp = self.inicio\r\n while(codigo != temp.get_codigo()):\r\n temp = temp.get_proximo()\r\n temp.set_email(email)\r\n return True\r\n\r\n def editartelefone (self, codigo, telefone):\r\n if (self.is_empty()):\r\n return False\r\n if (codigo <= 1):\r\n self.inicio.set_telefone(telefone)\r\n if (codigo >= self.tamanho):\r\n self.fim.set_telefone(telefone)\r\n else:\r\n temp = self.inicio\r\n while(codigo != temp.get_codigo()):\r\n temp = temp.get_proximo()\r\n temp.set_telefone(telefone)\r\n return True\r\n\r\n#Interesses\r\nclass interesses:\r\n def __init__(self):\r\n self.iniciointer = None\r\n self.fiminter = None\r\n self.proxcodinter = 1\r\n\r\n def InserirInteresse (self, inter):\r\n Novo = Interesse(inter)\r\n Novo.set_codigo(self.proxcodinter)\r\n if (self.proxcodinter == 1):\r\n self.iniciointer = self.fiminter = Novo #Inicio do primeiro interesse\r\n else:#Se não, inserir fim\r\n self.fim.set_proximo(Novo)\r\n Novo.set_anterior(self.fim)\r\n self.fim = Novo\r\n self.proxcodinter += 1\r\n\r\n\r\n def LimparNaoUsados (self):\r\n temp = self.iniciointer\r\n while (temp != None):\r\n if (temp.get_quantidadeint() == 0):\r\n if (temp.get_codigo()==1):\r\n self.iniciointer = temp.get_proximo()\r\n self.iniciointer.set_anterior(None)\r\n temp.set_proximo(None)\r\n temp = None\r\n if (temp.get_proximo != None):\r\n temp2 = temp.get_anterior()\r\n temp2.set_proximo(temp.get_proximo())\r\n temp2 = temp.get_proximo()\r\n temp2.set_anterior(temp.get_anterior())\r\n temp = None\r\n else:\r\n temp = self.fiminter.get_anterior()\r\n self.fiminter.set_anterior(None)\r\n temp.set_proximo(None)\r\n self.fiminter = temp\r\n self.proxcodinter -= 1\r\n self.organizarinteresses()\r\n temp = temp.get_proximo()\r\n\r\n\r\n def organizarinteresses (self):\r\n temp = self.iniciointer\r\n codigo = 1\r\n if (self.proxcodinter == 1):\r\n return False\r\n while (temp != None):\r\n temp.set_codigo(codigo)\r\n codigo += 1\r\n temp = temp.get_proximo()\r\n return True\r\n\r\n def exibirinteresses (self):\r\n if (self.is_empty()):\r\n print('Lista vazia')\r\n temp = self.inicio\r\n while(temp != None):\r\n self.printcontato(temp)\r\n temp = temp.get_proximo()\r\n\r\n" }, { "alpha_fraction": 0.61058109998703, "alphanum_fraction": 0.6149176359176636, "avg_line_length": 24.25, "blob_id": "ba7eb4bf3f7e5f9cbcf20fd1f46acd6e23d74acc", "content_id": "1dd2203048abc11b8ecd74b63518fff18a960b11", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1153, "license_type": "no_license", "max_line_length": 77, "num_lines": 44, "path": "/ProjetoAED (1)/Class/Interesse.py", "repo_name": "brunaeduarda/ProjetoAED", "src_encoding": "UTF-8", "text": "class Interesse: #Contato telefonico\r\n def __init__(self, inter):\r\n self.interesse = inter\r\n self.codigo = None\r\n self.proximo = None\r\n self.anterior = None\r\n self.quantidadeint = 0 #Quantidade de contatos que tem esse interesse\r\n\r\n#Gets\r\n def get_interesse(self):\r\n return self.interesse\r\n\r\n def get_proximo(self):\r\n return self.proximo\r\n\r\n def get_anterior(self):\r\n return self.anterior\r\n\r\n def get_codigo (self):\r\n return self.codigo\r\n\r\n def get_quantidadeint (self):\r\n return self.quantidadeint\r\n#Sets\r\n def set_interesse(self, inter):\r\n self.interesse = inter\r\n\r\n def set_proximo(self, proximo):\r\n self.proximo = proximo\r\n\r\n def set_anterior(self, anterior):\r\n self.anterior = anterior\r\n\r\n def set_codigo (self, codigo):\r\n self.codigo = codigo\r\n\r\n def set_quantidadeint (self, quandidade):\r\n self.quantidadeint = quandidade\r\n\r\n def add_quantidadeint (self):#ADD 1 na quantidade\r\n self.quantidadeint += 1\r\n\r\n def remove_quantidadeint (self): #Remove 1 na quantidade\r\n self.quantidadeint -= 1" }, { "alpha_fraction": 0.5615141987800598, "alphanum_fraction": 0.5652996897697449, "avg_line_length": 18.610389709472656, "blob_id": "a56b956169c6b6cfb6eafc0b8b39225ac0a7f150", "content_id": "d28ae59641910c5fb70a1bf54d1bfbb9c835eb40", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1585, "license_type": "no_license", "max_line_length": 76, "num_lines": 77, "path": "/ProjetoAED (1)/Class/contato.py", "repo_name": "brunaeduarda/ProjetoAED", "src_encoding": "UTF-8", "text": "class Contato: #Contato telefonico\r\n def __init__(self, codigo):\r\n self.codigo = codigo\r\n self.nome = None\r\n self.telefone = None\r\n self.email = None\r\n self.proximo = None\r\n self.anterior = None\r\n\r\n \"\"\" self.contatos Lista com os contatos / implantar Fase 2\"\"\"\r\n \"\"\" self.interesses Lista com os interesses / implantar Fase 3\"\"\"\r\n\r\n\r\n#GETS\r\n def get_codigo(self):\r\n return self.codigo\r\n\r\n def get_nome(self):\r\n return self.nome\r\n\r\n def get_telefone(self):\r\n return self.telefone\r\n\r\n def get_email(self):\r\n return self.email\r\n\r\n\r\n def get_proximo(self):\r\n return self.proximo\r\n\r\n def get_anterior(self):\r\n return self.anterior\r\n\r\n#SETS\r\n def set_codigo(self, codigo):\r\n self.codigo = codigo\r\n\r\n def set_nome(self, nome):\r\n self.nome = nome\r\n\r\n\r\n def set_telefone(self, telefone):\r\n self.telefone = telefone\r\n\r\n\r\n def set_email(self, email):\r\n self.email = email\r\n\r\n\r\n\r\n\r\n def set_proximo(self, contato):\r\n self.proximo = contato\r\n\r\n\r\n def set_anterior(self, contato):\r\n self.anterior = contato\r\n\r\n\"\"\" Implantar Fase 2\r\n def get_contatos(self):\r\n return self.contatos\r\n\"\"\"\r\n\r\n\"\"\" Implantar Fase 3\r\n def get_interesses(self):\r\n return self.interesses\r\n\"\"\"\r\n\r\n\"\"\" Implantar Fase 3\r\n def set_interesses(self, interesses):\r\n self.interesses = interesses\r\n\"\"\"\r\n\r\n\"\"\" Implantar Fase 2\r\n def set_contatos(self, contatos):\r\n self.contatos = contatos\r\n\"\"\"" } ]
5
LaloCo/PythonIntroduction
https://github.com/LaloCo/PythonIntroduction
4bd14af2f673033d484c14fb4268a9fa8ea9a8d9
3bc30b4434c5b7402ae2f6e3dceb3712e48c06c1
8dd96d90339ad11c50c3c57887ee6cd7eb189d76
refs/heads/master
2021-05-07T03:30:34.154941
2021-02-25T07:32:26
2021-02-25T07:32:26
110,925,455
0
2
null
null
null
null
null
[ { "alpha_fraction": 0.6850152611732483, "alphanum_fraction": 0.7155963182449341, "avg_line_length": 26.25, "blob_id": "22c3f282d5e14c44f2c08cdada949f47a42ece67", "content_id": "95f06e674b45b35ca0f0b60b0d706d7969f3fd82", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 327, "license_type": "no_license", "max_line_length": 66, "num_lines": 12, "path": "/break_time.py", "repo_name": "LaloCo/PythonIntroduction", "src_encoding": "UTF-8", "text": "import webbrowser\nimport time\n\nbreaks_to_take = 3\nbreaks_taken = 0\ntime_in_seconds_to_sleep = 60*60*2\n\nprint(\"Program started on \", time.ctime())\nwhile breaks_taken < breaks_to_take:\n time.sleep(time_in_seconds_to_sleep)\n webbrowser.open(\"https://www.youtube.com/watch?v=4MCjU-Du3eI\")\n breaks_taken = breaks_taken + 1\n" }, { "alpha_fraction": 0.6616102457046509, "alphanum_fraction": 0.6651108264923096, "avg_line_length": 33.15999984741211, "blob_id": "34f502775b851e6fdd1a0e012baa90587a86fdab", "content_id": "fd3be71b9bf62127386575604d346ddccfb50a0b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 857, "license_type": "no_license", "max_line_length": 113, "num_lines": 25, "path": "/check_profanity.py", "repo_name": "LaloCo/PythonIntroduction", "src_encoding": "UTF-8", "text": "import urllib.request\n\ndef read_text():\n quotes = open(r\"C:\\Users\\lalor\\Downloads\\some_file.txt\")\n contents_of_file = quotes.read()\n print(contents_of_file)\n quotes.close()\n check_profanity(contents_of_file)\n\ndef check_profanity(text_to_check):\n # Python 2: import urllib only\n # connection = urllib.urlopen(\"http://www.wdylike.appspot.com/?q=\" + text_to_check)\n # Python 3: import urllib.request\n connection = urllib.request.urlopen(\"http://www.wdylike.appspot.com/?q=\" + urllib.parse.quote(text_to_check))\n output = connection.read()\n output_string = output.decode(\"utf-8\")\n if \"true\" in output_string:\n print(\"Profanity Alert!\")\n elif \"false\" in output_string:\n print(\"Safe text, you can send it\")\n else:\n print(\"Could not scan the document properly\")\n connection.close()\n\nread_text() " }, { "alpha_fraction": 0.6304849982261658, "alphanum_fraction": 0.7274826765060425, "avg_line_length": 27.933332443237305, "blob_id": "832f67dc53ec27325253ea0a8a45bf95ab0b3a93", "content_id": "3b58f405bb82683e796123400f7f378d107a2dd2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 433, "license_type": "no_license", "max_line_length": 71, "num_lines": 15, "path": "/send_text.py", "repo_name": "LaloCo/PythonIntroduction", "src_encoding": "UTF-8", "text": "from twilio.rest import Client\n\n# Your Account SID from twilio.com/console\naccount_sid = \"AC03a3fac9c225f629f97bb3abe08f34e5\"\n# Your Auth Token from twilio.com/console\nauth_token = \"6208485a54865a2bffccd2eb00684458\"\n\nclient = Client(account_sid, auth_token)\n\nmessage = client.messages.create(\n to=\"+527711686391\", \n from_=\"+19492734551\",\n body=\"I want to know what interesting thing could I do with this!\")\n\nprint(\"SMS \", message.sid, \" sent.\")" }, { "alpha_fraction": 0.6602112650871277, "alphanum_fraction": 0.6848591566085815, "avg_line_length": 34.5625, "blob_id": "81dfb440773833f68982e902c5d7adc0b7f56636", "content_id": "fdeb0b09dc71f2d457819913946b5d0c5f4494ac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 568, "license_type": "no_license", "max_line_length": 94, "num_lines": 16, "path": "/rename_files.py", "repo_name": "LaloCo/PythonIntroduction", "src_encoding": "UTF-8", "text": "import os\nimport string\n\ndef rename_files():\n # 1 get file names from folder\n file_list = os.listdir(r\"C:\\Users\\lalor\\Downloads\\prank\\prank\") # r stands for raw\n os.chdir(r\"C:\\Users\\lalor\\Downloads\\prank\\prank\")\n print(file_list)\n # 2 rename files\n for file_name in file_list:\n # Python 2 os.rename(file_name, file_name.translate(None, \"0123456789\"))\n # Python 3\n translation = str.maketrans(string.ascii_letters, string.ascii_letters, string.digits)\n os.rename(file_name, file_name.translate(translation))\n\nrename_files()" }, { "alpha_fraction": 0.6759545803070068, "alphanum_fraction": 0.6790505647659302, "avg_line_length": 52.83333206176758, "blob_id": "666162f5c9a6d771195a1af9b40ba3f53dc3d978", "content_id": "80c02be01dc9bbeee24ee2e096304171b55d9b46", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 969, "license_type": "no_license", "max_line_length": 171, "num_lines": 18, "path": "/get_files_out.py", "repo_name": "LaloCo/PythonIntroduction", "src_encoding": "UTF-8", "text": "import os\nimport string\nimport shutil\n\ndef get_files_out():\n # 1 get file names from folder\n dir_list = [d for d in os.listdir(r\"/Users/eduardorosas/Downloads/fonts\") if os.path.isdir(os.path.join(r\"/Users/eduardorosas/Downloads/fonts\", d))] # r stands for raw\n os.chdir(r\"/Users/eduardorosas/Downloads/fonts\")\n # 2 rename files\n for directory in dir_list:\n onlyfiles = [f for f in os.listdir(directory) if os.path.isfile(os.path.join(directory, f))]\n for file_name in onlyfiles:\n if not os.path.exists(os.path.join(\"/Users/eduardorosas/Downloads/fonts/\" + directory, file_name)):\n shutil.move(os.path.join(\"/Users/eduardorosas/Downloads/fonts/\" + directory, file_name), \"/Users/eduardorosas/Downloads/fonts/all/\")\n else:\n shutil.move(os.path.join(\"/Users/eduardorosas/Downloads/fonts/\" + directory, file_name), \"/Users/eduardorosas/Downloads/fonts/all/\"+file_name+\"_2\")\n\nget_files_out()\n" }, { "alpha_fraction": 0.5951717495918274, "alphanum_fraction": 0.639739990234375, "avg_line_length": 18.25, "blob_id": "8e29e67757379250b93509aa64c513187f676c83", "content_id": "8d93782931ff46674d588f45a60411ffe3ab41fc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1077, "license_type": "no_license", "max_line_length": 31, "num_lines": 56, "path": "/mindstorms.py", "repo_name": "LaloCo/PythonIntroduction", "src_encoding": "UTF-8", "text": "import turtle\n\ndef draw_square(turtle):\n for i in range(0,4):\n turtle.forward(100)\n turtle.right(90)\n\ndef draw_circle(turtle):\n turtle.circle(50)\n\ndef draw_triangle(turtle):\n for i in range(0,3):\n turtle.forward(100)\n turtle.left(120)\n\ndef draw_art():\n window = turtle.Screen()\n window.bgcolor(\"#293275\")\n\n turtleOne = turtle.Turtle()\n turtleOne.shape(\"circle\")\n turtleOne.color(\"#00afb4\")\n turtleOne.speed(2)\n\n draw_square(turtleOne)\n\n turtleOne.shape(\"arrow\")\n turtleOne.color(\"#ff7301\")\n\n draw_circle(turtleOne)\n\n turtleOne.shape(\"turtle\")\n turtleOne.color(\"yellow\")\n turtleOne.left(180)\n\n draw_triangle(turtleOne)\n\n window.exitonclick()\n\ndef draw_circle_of_squares():\n window = turtle.Screen()\n window.bgcolor(\"#293275\")\n\n turtleOne = turtle.Turtle()\n turtleOne.shape(\"circle\")\n turtleOne.color(\"#00afb4\")\n turtleOne.speed(0)\n\n for i in range(0,72):\n draw_square(turtleOne)\n turtleOne.right(5)\n\n window.exitonclick()\n\ndraw_circle_of_squares()\ndraw_art()" }, { "alpha_fraction": 0.648204505443573, "alphanum_fraction": 0.6664637923240662, "avg_line_length": 32.551021575927734, "blob_id": "d980ea35b9a667c02531737cdba1ab7f61448165", "content_id": "d4db4a9c401d30f8a9eeaa5cb5bc11d44977acab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1643, "license_type": "no_license", "max_line_length": 98, "num_lines": 49, "path": "/stock price prediction/main.py", "repo_name": "LaloCo/PythonIntroduction", "src_encoding": "UTF-8", "text": "import csv, os\nimport numpy as np\nfrom sklearn.svm import SVR\nimport matplotlib.pyplot as plt\nfrom datetime import datetime\n\nopen_prices = []\nprices = []\n\ndef get_data(filename):\n os.chdir(\"stock price prediction\")\n with open(filename, 'r') as csvfile:\n csvFileReader = csv.reader(csvfile)\n next(csvFileReader)\n for row in csvFileReader:\n open_prices.append(float(row[1])) # features are: open price\n prices.append(float(row[4])) # values are: close price\n return\n\ndef predict_prices(open_prices, prices, x):\n open_prices = np.reshape(open_prices, (len(open_prices), 1))\n\n svr_lin = SVR(kernel='linear', C=1000.0)\n #svr_poly = SVR(kernel='poly', C=1000.0, degree=2)\n svr_rbf = SVR(kernel='rbf', C=1000.0, gamma=0.1)\n\n # Train\n svr_lin.fit(open_prices, prices)\n #svr_poly.fit(open_prices, prices)\n svr_rbf.fit(open_prices, prices)\n\n # Predict and Plot\n plt.scatter(open_prices, prices, color='black', label='Data')\n plt.plot(open_prices, svr_lin.predict(open_prices), color='red', label='Linear model')\n #plt.plot(open_prices, svr_poly.predict(open_prices), color='green', label='Polynomial model')\n plt.plot(open_prices, svr_rbf.predict(open_prices), color='blue', label='RBF model')\n plt.xlabel('Date')\n plt.ylabel('Price')\n plt.title('Support Vector Regression')\n plt.legend()\n plt.show()\n\n #return svr_lin.predict(x)[0], svr_poly.predict(x)[0], svr_rbf.predict(x)[0]\n return svr_lin.predict(x)[0], svr_rbf.predict(x)[0]\n\nget_data('TSLA.csv')\npredicted_prices = predict_prices(open_prices, prices, 300.0)\n\nprint(predicted_prices)" } ]
7
madriss/Dermoscopy-CNN
https://github.com/madriss/Dermoscopy-CNN
59e5180a4bfe9b5229585e50fcacb85d41c9f7bd
1e7c0a244037d2f379d192e2f6cb6e3555d0512c
c87c0244f93153a6cfc340edb0f927018ba003fe
refs/heads/master
2020-06-26T00:08:48.571454
2019-07-29T13:59:44
2019-07-29T13:59:44
199,463,464
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8079625368118286, "alphanum_fraction": 0.8079625368118286, "avg_line_length": 29.5, "blob_id": "f2565e862dcca183df709b73183cafbea2f41f89", "content_id": "96023ec07830e16c1fb9504eecdcb00aa442bc85", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 429, "license_type": "no_license", "max_line_length": 88, "num_lines": 14, "path": "/README.md", "repo_name": "madriss/Dermoscopy-CNN", "src_encoding": "UTF-8", "text": "# Skin lesion identification based on dermoscopy images\n\n# Goal\nSubmit automated predictions of disease classification within dermoscopic images.\n\n# Possible disease categories are:\n\n- Melanoma\n- Melanocytic nevus\n- Basal cell carcinoma\n- Actinic keratosis / Bowen’s disease (intraepithelial carcinoma)\n- Benign keratosis (solar lentigo / seborrheic keratosis / lichen planus-like keratosis)\n- Dermatofibroma\n- Vascular lesion\n" }, { "alpha_fraction": 0.6373980641365051, "alphanum_fraction": 0.6535179018974304, "avg_line_length": 32.58598709106445, "blob_id": "2fc1d62cbd436717742463a22a016d57062be9ad", "content_id": "7f92f76389b849b506103421d530c1aa95d6c63f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5273, "license_type": "no_license", "max_line_length": 129, "num_lines": 157, "path": "/app.py", "repo_name": "madriss/Dermoscopy-CNN", "src_encoding": "UTF-8", "text": "# coding=utf-8\nimport sys\nimport os\nimport glob\nimport re\nimport numpy as np\nimport scipy as sp\n\n#keras\nimport keras\nfrom keras.layers import Dense,GlobalAveragePooling2D\nfrom keras.applications import MobileNetV2\nfrom keras.preprocessing import image\nfrom keras.applications.mobilenetv2 import preprocess_input\nfrom keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img\nfrom keras.models import Model, load_model\nfrom keras.optimizers import Adam\nfrom keras import backend as K\nimport itertools\n\n#matplotlib\nimport matplotlib.pyplot as plt\n\n# Flask utils\nfrom flask import Flask, redirect, url_for, request, render_template\nfrom werkzeug.utils import secure_filename\nfrom gevent.pywsgi import WSGIServer\n\n#prediction dictionary\npred_dict = {0: 'Actinic keratoses',\n 1: 'Basal cell carcinoma',\n 2: 'Benign keratosis-like lesions ',\n 3: 'Dermatofibroma',\n 4: 'Melanocytic nevi',\n 5: 'Melanoma',\n 6: 'Vascular lesions'}\n\nget_label = lambda lst: np.array([pred_dict[x] for x in lst])\n\n# Define a flask app\napp = Flask(__name__)\n\n# Model saved with Keras model.save()\nMODEL_PATH = 'models/HAM1000_best_model.hdf5'\n\n# Load your trained model\nmodel = load_model(MODEL_PATH)\nmodel._make_predict_function()\nprint('Model loaded. Check http://127.0.0.1:5000/')\n\n\ndef model_predict(img_path, model):\n img = image.load_img(img_path, target_size=(224, 224))\n\n # Preprocessing the image\n x = image.img_to_array(img)\n # x = np.true_divide(x, 255)\n x = np.expand_dims(x, axis=0)\n\n # Be careful how your trained model deals with the input\n # otherwise, it won't make correct prediction!\n x = preprocess_input(x, mode='caffe')\n\n preds = model.predict(x)\n return preds\n\ndef get_last_conv_model(model):\n last_conv_layer_name = [layer for layer in model.layers if \"conv\" in layer.name.lower()][-1].name\n print(f\"Found last conv layer to be: {last_conv_layer_name}\")\n last_conv_model = Model(model.input, model.get_layer(last_conv_layer_name).output)\n return last_conv_model\n\ndef get_cam(img, model, last_conv_model):\n img = img[np.newaxis,:,:]\n #Obtaining class_weights\n gap_layers_inds = np.argwhere([True if \"global_average_pooling2d\" in layer.name.lower() else False for layer in model.layers ])\n gap_layer = model.layers[gap_layers_inds.flatten()[-1] + 1]\n gap_layer_weights = gap_layer.get_weights()[0]\n pred_probas = model.predict(img)\n pred_class = np.argmax(pred_probas.flatten())\n pred_class_proba = pred_probas.flatten()[pred_class]\n class_weights = gap_layer_weights[:,pred_class]\n\n #Extracting last conv layer\n conv_out = last_conv_model.predict(img).squeeze()\n h, w = img.shape[1]/conv_out.shape[0], img.shape[2]/conv_out.shape[1]\n\n conv_out = sp.ndimage.zoom(conv_out, (h, w, 1), order=1)\n\n return np.dot(conv_out, class_weights), pred_class, pred_class_proba\n\nlast_conv_model = get_last_conv_model(model)\nlast_conv_model._make_predict_function()\n\n# make a prediction using test-time augmentation\ndef tta_prediction(datagen, model, x, steps=5):\n # prepare iterator\n\n it = datagen.flow(x=x,\n batch_size=1,\n shuffle=False)\n predictions = []\n for i in range(steps):\n # make predictions for each augmented image\n yhats = model.predict_generator(it, steps=it.n//it.batch_size, verbose=0)\n predictions.append(yhats)\n pred = np.mean(predictions, axis=0)\n return np.argmax(pred, axis=-1), np.max(pred)\n\n\ntta_datagen = ImageDataGenerator(\n preprocessing_function=preprocess_input,\n rotation_range=7,\n width_shift_range=0.1,\n height_shift_range=0.1,\n shear_range=0.1,\n zoom_range=0.1,\n horizontal_flip=True,\n vertical_flip=True,\n fill_mode='nearest')\n\n@app.route('/', methods=['GET'])\ndef index():\n # Main page\n return render_template('index.html')\n\n@app.route('/predict', methods=['GET', 'POST'])\ndef upload():\n if request.method == 'POST':\n # Get the file from post request\n f = request.files['file']\n\n # Save the file to ./uploads\n basepath = os.path.dirname(__file__)\n file_path = os.path.join(\n basepath, 'uploads', secure_filename(f.filename))\n f.save(file_path)\n\n # Make prediction\n x_test = load_img(file_path, target_size=(224, 224))\n x_test = img_to_array(x_test) # this is a Numpy array with shape (3, 224, 224)\n x_test = preprocess_input(x_test)\n # pred_class = tta_prediction(tta_datagen, model, x_test[np.newaxis,:,:])\n cam, pred_class, pred_class_proba = get_cam(x_test, model, last_conv_model)\n pred_class, pred_class_proba = tta_prediction(tta_datagen, model, x_test[np.newaxis,:,:], steps=5)\n f, ax = plt.subplots(1, 1)\n ax.imshow(x_test)\n ax.imshow(cam, cmap='jet', alpha=0.3)\n cam_file_path = file_path.split('.')[0]+\"_cam.png\"\n f.savefig(cam_file_path)\n result = pred_dict[pred_class[0]] # Convert to string\n return f\"{result} with {pred_class_proba*100:.4f}% probability\"\n return None\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n" } ]
2
FAS-Server/LiveBotController
https://github.com/FAS-Server/LiveBotController
e63a3e3e4c61631f5a9cbd2c100a0c5879e2fd60
6cd1e50a3d7105838655027a0ee17b826241c017
97e2541fe6cb2d0b82781082851238226422f70b
refs/heads/main
2023-07-21T23:41:44.645253
2021-08-14T03:23:22
2021-08-14T03:23:22
390,574,074
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5803009271621704, "alphanum_fraction": 0.5839747786521912, "avg_line_length": 33.06748580932617, "blob_id": "782ce132ff66b7fcee8d372ca58131b49a916e0e", "content_id": "81a0c5dcc315a9bdf5e4c9632a1add8d9dcf3a07", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11462, "license_type": "permissive", "max_line_length": 114, "num_lines": 326, "path": "/LiveBotController.py", "repo_name": "FAS-Server/LiveBotController", "src_encoding": "UTF-8", "text": "import enum\r\nimport os\r\nimport random\r\nimport re\r\nimport time\r\nfrom typing import Optional\r\n\r\nimport yaml\r\nfrom mcdreforged.api.all import *\r\n\r\nPLUGIN_METADATA = {\r\n 'id': 'livebot_controller',\r\n 'version': '0.2.2',\r\n 'name': 'LiveBotController',\r\n 'description': \"A MCDR plugin for controlling livebot\",\r\n 'author': ['Youmiel','YehowahLiu'],\r\n 'link': 'https://github.com/FAS-Server/LiveBotController',\r\n 'dependencies': {\r\n 'mcdreforged': '>=1.0.0',\r\n }\r\n}\r\n\r\nSTATE_PATTERN = re.compile(r'Bot state: (Normal|Offline|Spectating [\\w]{3,16})')\r\n# Botstate: Offline | Normal | Spectating <player>\r\nLIST_PATTERN = re.compile(r'There are ([0-9]+) of a max of ([0-9]+) players online:\\s?(.*)')\r\n# group(1): player number\r\n# group(2): max players\r\n# group(3): player list\r\nCONFIG_PATH = os.path.join('config', 'LiveBotController.yml')\r\nLIVEBOT_CONFIG = os.path.join('server', 'LiveBotFabric', 'config.json')\r\nLANDSCAPE_PATH = os.path.join('config', 'LiveBotLandscape.txt')\r\n\r\nPREFIX = \"!!live\"\r\n\r\ndefault_config = {\r\n 'randomTpDelay': 30,\r\n 'excludedPrefix': '',\r\n 'excludedSuffix': '',\r\n}\r\nconfig = default_config.copy()\r\n\r\n\r\n# -------------------------------------------\r\nclass PlayerStack:\r\n players: list\r\n\r\n def __init__(self) -> None:\r\n self.players = []\r\n\r\n def push(self, player: str):\r\n if player in self.players:\r\n self.players.remove(player)\r\n self.players.append(player)\r\n\r\n def pop(self) -> Optional[str]:\r\n if len(self.players) > 0:\r\n player = self.players[-1]\r\n self.players.remove(player)\r\n return player\r\n else:\r\n return None\r\n\r\n def top(self):\r\n if len(self.players) > 0:\r\n return self.players[-1]\r\n else:\r\n return None\r\n\r\n def size(self):\r\n return len(self.players)\r\n\r\nclass LiveBotController:\r\n class Mode(enum.Enum):\r\n EMPTY = 'EMPTY'\r\n OCCUPIED = 'OCCUPIED'\r\n RANDOM = 'RANDOM'\r\n\r\n def __init__(self) -> None:\r\n self.online = False\r\n self.running = False\r\n self.mode = LiveBotController.Mode.EMPTY\r\n self.occupied_players = PlayerStack()\r\n self.time_since_last_tp = time.time()\r\n\r\n def start(self) -> None:\r\n self.running = True\r\n cast('bot_start')\r\n self.tick()\r\n\r\n @new_thread('LiveBotController')\r\n def tick(self):\r\n while self.running:\r\n if self.online:\r\n if self.occupied_players.size() == 0 and self.mode != LiveBotController.Mode.RANDOM:\r\n self.mode = LiveBotController.Mode.RANDOM\r\n if self.occupied_players.size() > 0 and self.mode != LiveBotController.Mode.OCCUPIED:\r\n self.mode = LiveBotController.Mode.OCCUPIED\r\n {\r\n LiveBotController.Mode.EMPTY: self.do_empty,\r\n LiveBotController.Mode.OCCUPIED: self.do_occupied,\r\n LiveBotController.Mode.RANDOM: self.do_random,\r\n }[self.mode]()\r\n time.sleep(1)\r\n cast('bot_stop')\r\n\r\n def do_empty(self): # really empty :)\r\n pass\r\n\r\n def do_occupied(self):\r\n global plugin_fields\r\n if self.occupied_players.top() not in plugin_fields.player_list:\r\n self.occupied_players.pop()\r\n if self.occupied_players.size() != 0:\r\n plugin_fields.server.rcon_query(\"botfollow %s\" % self.occupied_players.top())\r\n\r\n def do_random(self):\r\n global plugin_fields, config\r\n if (time.time() - self.time_since_last_tp) < config['randomTpDelay']:\r\n return\r\n self.time_since_last_tp = time.time()\r\n if self.online and len(plugin_fields.player_list) <= 1:\r\n if len(plugin_fields.landscapes) > 0:\r\n index = random.randint(0, len(plugin_fields.landscapes) - 1)\r\n plugin_fields.server.rcon_query(plugin_fields.landscapes[index])\r\n elif self.online:\r\n '''\r\n pattern = plugin_fields.player_pattern\r\n while(len(plugin_fields.player_list) > 1):\r\n index = random.randint(0, len(plugin_fields.player_list) - 1)\r\n player = plugin_fields.player_list[index]\r\n if re.fullmatch(pattern, player) is None: \r\n break\r\n # old logic\r\n '''\r\n index = random.randint(0, len(plugin_fields.player_list) - 1)\r\n player = plugin_fields.player_list[index]\r\n plugin_fields.server.rcon_query(\"botfollow %s\" % player)\r\n\r\n def add_occupation(self, player: str):\r\n if self.online and self.running:\r\n self.occupied_players.push(player)\r\n plugin_fields.server.rcon_query('botfollow %s' % player)\r\n plugin_fields.server.broadcast('玩家 %s 临时获得了直播视角的控制权' % player)\r\n\r\n def copy(self):\r\n bot = LiveBotController()\r\n bot.mode = self.mode\r\n bot.occupied_players = self.occupied_players\r\n bot.online = self.online\r\n bot.running = self.running\r\n bot.time_since_last_tp = self.time_since_last_tp\r\n return bot\r\n\r\n\r\n# -------------------------------------------\r\nclass Fields:\r\n def __init__(self) -> None:\r\n self.server = None\r\n self.bot = LiveBotController()\r\n #self.player_num = 0\r\n self.player_list = []\r\n #self.landscape_num = 0\r\n self.landscapes = []\r\n self.player_pattern = None\r\n\r\n\r\nplugin_fields = Fields()\r\n\r\n\r\n# -------------------------------------------\r\ndef load_config(server: ServerInterface):\r\n global config\r\n try:\r\n config = {}\r\n with open(CONFIG_PATH) as file:\r\n conf_yaml = yaml.load(file, Loader=yaml.Loader) # idk why CLoader doesn't work\r\n for key in default_config.keys():\r\n config[key] = conf_yaml[key]\r\n server.logger.info('Config file loaded')\r\n except Exception as e:\r\n server.logger.warning('fail to read config file: %s, using default config' % e)\r\n config = default_config.copy()\r\n with open(CONFIG_PATH, 'w') as file:\r\n yaml.dump(default_config, file)\r\n\r\n\r\ndef load_landscape(server: ServerInterface):\r\n global plugin_fields\r\n try:\r\n with open(LANDSCAPE_PATH, 'r') as file:\r\n plugin_fields.landscapes = []\r\n for line in file:\r\n #plugin_fields.landscapes.append(str.removesuffix(line, '\\n'))\r\n plugin_fields.landscapes.append(line.replace('\\n', ''))\r\n server.logger.info('Landscape file loaded')\r\n except FileNotFoundError as e:\r\n server.logger.warning('fail to read landscape file: %s, creating it automatically.' % e)\r\n with open(LANDSCAPE_PATH, 'w') as file:\r\n pass\r\n\r\n\r\ndef build_command(server: ServerInterface):\r\n # register help message\r\n server.register_help_message(PREFIX, \"Control the livebot\")\r\n node = Literal(PREFIX).runs(occupy)\r\n server.register_command(node)\r\n server.register_command(Literal('!!test').requires(lambda src: src.has_permission(3)).runs(peek))\r\n\r\n\r\n@new_thread('LiveBotController_checkRcon')\r\ndef check_rcon():\r\n global plugin_fields\r\n time.sleep(1)\r\n # plugin_fields.server.logger.info('testing RCON...\\n')\r\n if plugin_fields.server.is_server_startup() and not plugin_fields.server.is_rcon_running():\r\n cast('no_rcon')\r\n plugin_fields.server.unload_plugin(PLUGIN_METADATA['id'])\r\n\r\n\r\n@new_thread('UpdatePlayer')\r\ndef update_player_list(server: ServerInterface):\r\n global plugin_fields\r\n query = server.rcon_query('list')\r\n match = re.match(LIST_PATTERN, query)\r\n if match:\r\n plugin_fields.player_list = re.split(',\\s', match.group(3))\r\n for player in plugin_fields.player_list:\r\n if plugin_fields.player_pattern is None:\r\n break\r\n if re.fullmatch(plugin_fields.player_pattern, player.lower()) is not None:\r\n plugin_fields.server.logger.info('remove %s' % player)\r\n plugin_fields.player_list.remove(player)\r\n\r\n\r\n@new_thread('UpdatePlayer')\r\ndef update_bot_state(server: ServerInterface):\r\n global plugin_fields\r\n query = server.rcon_query('botstate')\r\n match = re.match(STATE_PATTERN, query)\r\n if match:\r\n if plugin_fields.bot.online and match.group(1) == 'Offline':\r\n plugin_fields.bot.online = False\r\n elif not (plugin_fields.bot.online or match.group(1) == 'Offline'):\r\n plugin_fields.bot.online = True\r\n\r\n\r\ndef occupy(cmd_src: CommandSource):\r\n global plugin_fields\r\n if cmd_src.is_player:\r\n plugin_fields.bot.add_occupation(cmd_src.player)\r\n else:\r\n cast('console_warning')\r\n\r\n\r\ndef cast(event: str):\r\n global plugin_fields\r\n server = plugin_fields.server\r\n {\r\n 'bot_start': lambda: server.logger.info('Bot started.'),\r\n 'bot_stop': lambda: server.logger.info('Bot stopped.'),\r\n 'console_warning': lambda: server.logger.warning('Console command is not supported.'),\r\n 'no_rcon': lambda: server.logger.warning('RCON is not enabled, unloading plugin.'),\r\n 'thing': lambda: server.logger.info('something\\n')\r\n }[event]()\r\n\r\n\r\ndef peek(cmd_src: CommandSource):\r\n cmd_src.reply('plugin_fields:' + plugin_fields.player_list.__str__() + '_%d' % len(plugin_fields.player_list))\r\n cmd_src.reply('landscape:' + plugin_fields.landscapes.__str__() + '_%d' % len(plugin_fields.landscapes))\r\n cmd_src.reply('bot: mode: ' + plugin_fields.bot.mode.__str__() +\r\n ', running: ' + plugin_fields.bot.running.__str__() +\r\n ', online: ' + plugin_fields.bot.online.__str__() +\r\n ', list: ' + plugin_fields.bot.occupied_players.players.__str__() +\r\n ', count: ' + plugin_fields.bot.occupied_players.size().__str__())\r\n cmd_src.reply('config: ' + config.__str__())\r\n pass\r\n\r\n\r\n# -------------------------------------------\r\n\r\ndef on_load(server: ServerInterface, old_module):\r\n global plugin_fields, config\r\n if old_module is not None:\r\n plugin_fields = old_module.plugin_fields\r\n plugin_fields.bot = old_module.plugin_fields.bot.copy()\r\n plugin_fields.server = server\r\n load_config(server)\r\n load_landscape(server)\r\n check_rcon()\r\n if config['excludedPrefix'] != '' or config['excludedSuffix'] != '':\r\n plugin_fields.player_pattern = re.compile(\r\n r'(' + config['excludedPrefix'].lower() + r')' +\r\n r'\\w+' +\r\n r'(' + config['excludedSuffix'].lower() + r')'\r\n )\r\n else:\r\n plugin_fields.player_pattern = None\r\n build_command(server)\r\n if server.is_server_startup():\r\n plugin_fields.bot.start()\r\n\r\n\r\ndef on_unload(server: ServerInterface):\r\n global plugin_fields\r\n plugin_fields.bot.running = False\r\n\r\n\r\ndef on_server_stop(server: ServerInterface, code: int):\r\n global plugin_fields\r\n plugin_fields.bot.running = False\r\n\r\n\r\ndef on_server_startup(server: ServerInterface):\r\n global plugin_fields\r\n check_rcon()\r\n plugin_fields.bot.start()\r\n\r\n\r\ndef on_player_left(server: ServerInterface, player):\r\n update_player_list(server)\r\n update_bot_state(server)\r\n\r\n\r\ndef on_player_joined(server: ServerInterface, player: str, info: Info):\r\n update_player_list(server)\r\n update_bot_state(server)\r\n" }, { "alpha_fraction": 0.7583892345428467, "alphanum_fraction": 0.7583892345428467, "avg_line_length": 28.799999237060547, "blob_id": "07dab3e4449812cd239151b14fcb0fcd8b71f869", "content_id": "6ae6aa2edc4d7c153078bde8bfef80498841f42a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 149, "license_type": "permissive", "max_line_length": 62, "num_lines": 5, "path": "/README.md", "repo_name": "FAS-Server/LiveBotController", "src_encoding": "UTF-8", "text": "# LiveBotController\nA MCDR plugin for controlling livebot, requires LivebotFabric.\n\n#### Commands\n- **!!live** to gain temporary control of livebot.\n" } ]
2
NurErtem/Fall2016Swe573NurErtem
https://github.com/NurErtem/Fall2016Swe573NurErtem
06b84b8799d64ebf82aae06de6747997a8026e59
819e842874b388921a4703be9b946567f30399bb
bc5d264e495798124652da6a3d22db6674b93145
refs/heads/master
2021-01-12T15:07:37.174848
2016-12-27T12:15:07
2016-12-27T12:15:07
69,350,182
1
0
null
2016-09-27T11:34:26
2016-12-13T12:33:01
2016-12-16T16:39:22
JavaScript
[ { "alpha_fraction": 0.591289758682251, "alphanum_fraction": 0.6013400554656982, "avg_line_length": 20.285715103149414, "blob_id": "32fb69beb0167305ae916442a683f3142bc26265", "content_id": "b174e5e945fc4ff3bf9cf4531167e8dee7bcac2e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 597, "license_type": "permissive", "max_line_length": 59, "num_lines": 28, "path": "/server1/engine/core/menu.py", "repo_name": "NurErtem/Fall2016Swe573NurErtem", "src_encoding": "UTF-8", "text": "\ndef get_menu_items(active_menu='Dashboard'):\n\tmenu_items = []\n\n\tmenu1 = {\n\t\t'active': 'active' if active_menu == 'Dashboard' else '',\n\t\t'class': 'ti-panel',\n\t\t'name': 'Dashboard',\n\t\t'link': '/core/dashboard'\n\t}\n\tmenu_items.append(menu1)\n\n\tmenu2 = {\n\t\t'active': 'active' if active_menu == 'Timeline' else '',\n\t\t'class': 'ti-text',\n\t\t'name': 'Timeline',\n\t\t'link': '/core/timeline'\n\t}\n\tmenu_items.append(menu2)\n\n\tmenu3 = {\n\t\t'active': 'active' if active_menu == 'Profile' else '',\n\t\t'class': 'ti-user',\n\t\t'name': 'Profile',\n\t\t'link': '/core/profile'\n\t}\n\tmenu_items.append(menu3)\n\n\treturn menu_items\n" }, { "alpha_fraction": 0.7414330244064331, "alphanum_fraction": 0.7414330244064331, "avg_line_length": 19.0625, "blob_id": "4786b7a21d45bc4f6b1d7da222453329e1b4069f", "content_id": "c0bac69a4d6d932d10be121ea91b30457e216337", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 321, "license_type": "permissive", "max_line_length": 51, "num_lines": 16, "path": "/server1/engine/core/forms.py", "repo_name": "NurErtem/Fall2016Swe573NurErtem", "src_encoding": "UTF-8", "text": "'''\nimport floppyforms as forms\n'''\nfrom django import forms\nimport datetime\nfrom core.models import activitiesMade, UserProfile\n\nclass activitiesMade(forms.ModelForm):\n\tclass Meta:\n\t\tmodel = activitiesMade\n\t\tfields = '__all__'\n\nclass saveProfile(forms.ModelForm):\n\tclass Meta:\n\t\tmodel = UserProfile\n\t\tfields = '__all__'\n" }, { "alpha_fraction": 0.4406392574310303, "alphanum_fraction": 0.4406392574310303, "avg_line_length": 24.823530197143555, "blob_id": "b72f3e1fa63dfcbdbf7385d1931552e302661755", "content_id": "52555482c15092eab9fbfe6c77be699d68053698", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 438, "license_type": "permissive", "max_line_length": 78, "num_lines": 17, "path": "/server1/engine/static/old/food/list_food.html", "repo_name": "NurErtem/Fall2016Swe573NurErtem", "src_encoding": "UTF-8", "text": "{% extends \"template.html\" %}\n\n{% block content %}\n {% if foods %}\n <div class=\"row\">\n <ul>\n {% for food in foods %}\n <span class=\"glyphicon glyphicon-circle-arrow-right\"></span>\n <a href=\"{% url 'food:get-food' food.ndbno %}\">{{ food.name }}</a>\n <br/>\n {% endfor %}\n </ul>\n </div>\n {% else %}\n <p>No foods found.</p>\n {% endif %}\n{% endblock %}" }, { "alpha_fraction": 0.5476838946342468, "alphanum_fraction": 0.5504087209701538, "avg_line_length": 25.285715103149414, "blob_id": "918e6550ba71262a4fceacdfd9a53812a2e49552", "content_id": "7e6750e2ec583f577e4f538a4f4aad4771b6de58", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 367, "license_type": "permissive", "max_line_length": 55, "num_lines": 14, "path": "/server1/engine/static/old/home.js", "repo_name": "NurErtem/Fall2016Swe573NurErtem", "src_encoding": "UTF-8", "text": "function search_foods(callback) {\n\t$.ajax({\n\t type: 'GET',\n\t url: '/energy/food/?name=' + $('#txtName').val(),\n\t dataType: 'json',\n contentType: 'application/json; charset=utf-8',\n\t success: function(response) {\n\t $('#lblData').html(JSON.stringify(response));\n\t },\n\t error: function(error) {\n\t console.log(error);\n\t }\n\t });\n}" }, { "alpha_fraction": 0.48672565817832947, "alphanum_fraction": 0.5280236005783081, "avg_line_length": 15.899999618530273, "blob_id": "91c2b7661f38161a71aa6165e6fcbec81a93b0d0", "content_id": "f5e653394404c41903c2ae5583f5398dc0f25782", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 678, "license_type": "permissive", "max_line_length": 23, "num_lines": 40, "path": "/server1/engine/core/data.py", "repo_name": "NurErtem/Fall2016Swe573NurErtem", "src_encoding": "UTF-8", "text": "\n\ndef get_cards():\n\tcards = []\n\n\tcard1 = {\n\t\t'title': 'Weight',\n\t\t'value': '81',\n\t\t'unit': 'kg',\n\t\t'comment': 'today',\n\t\t'icon1': 'ti-server',\n\t\t'icon2': 'ti-reload'\n\t}\n\tcards.append(card1)\n\tcard2 = {\n\t\t'title': 'BMI',\n\t\t'value': '24.5',\n\t\t'unit': '',\n\t\t'comment': 'today',\n\t\t'icon1': 'ti-pulse',\n\t\t'icon2': 'ti-reload'\n\t}\n\tcards.append(card2)\n\tcard3 = {\n\t\t'title': 'Intake',\n\t\t'value': '2250',\n\t\t'unit': 'cal',\n\t\t'comment': 'today',\n\t\t'icon1': 'ti-pulse',\n\t\t'icon2': 'ti-reload'\n\t}\n\tcards.append(card3)\n\tcard4 = {\n\t\t'title': 'Outtake',\n\t\t'value': '750',\n\t\t'unit': 'cal',\n\t\t'comment': 'today',\n\t\t'icon1': 'ti-pulse',\n\t\t'icon2': 'ti-reload'\n\t}\n\tcards.append(card4)\n\treturn cards\n" }, { "alpha_fraction": 0.7635782957077026, "alphanum_fraction": 0.7837060689926147, "avg_line_length": 42.44444274902344, "blob_id": "0a56179a05e22a0a368f27f9a3f3b42aa367578f", "content_id": "feaf62b4d7d8dd6803e877755719026120740847", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3130, "license_type": "permissive", "max_line_length": 556, "num_lines": 72, "path": "/README.md", "repo_name": "NurErtem/Fall2016Swe573NurErtem", "src_encoding": "UTF-8", "text": "\n#\"Karma Circle\": a Software Engineering course project \n \n\n Abstract:\n\n This is a project repository created to record all the documentation and code necessary to deliver the web application required to be developed and deployed as part of a full stack development course being taught at Bogazici University.\n\n The web app is simply designed to track calories gained and calories spent during the day. The user will be able track and monitor his progress daily, monthly and annually. The app will provide guidance, more like a refernce point, and help the user to observe his behaviour in order to try to have a healthy outlook. The application also incorporates a daily emotional rating scale. The mood tracking ability tries to bring a comprehensive view to the overall health of the individual, and distinguishes the application by deisgn from its counterparts. \n \n* [PROJECT PLAN GUIDELINES](https://github.com/NurErtem/Fall2016Swe573NurErtem/wiki/01-project-plan-guidelines) \n* [REQUIREMENTS DOCUMENT](https://github.com/NurErtem/Fall2016Swe573NurErtem/wiki/02-Requirements-Document) \n* [SOFTWARE DESIGN DOCUMENT](https://github.com/NurErtem/Fall2016Swe573NurErtem/wiki/03-Software-Design-Document) \n* [UI DESIGN](https://github.com/NurErtem/Fall2016Swe573NurErtem/wiki/04-Draft-UI-design-and-wireframe-mock-ups) \n* [RESEARCH BIBLIOGRAPHY](https://github.com/NurErtem/Fall2016Swe573NurErtem/wiki/05-research-bibliography-and-references) \n\n### The below technologies and tools are used in making of this web app: \n\n* Python 3: the language of the Django framework\n\n* Django 1.10.4: web framework\n\n* SQLite: default DB of Django is used \n\n* Zen Hub: time management tool\n\n* MS Project 2016: Project management tool \n\n* GitHUb tags, issues utilized as tasks, milestones in line with the project plan \n\n* DrawIO: mockups and SDD diagrams\n\n* PyCharm: IDE for Django environment back end develeopment\n\n* WebStorm: IDE for HTML, Javascript,CSS, ajax etc front end development\n\n* Postman: RESTful API browser\n\n* GitHub Desktop: to push and pull local dev files to github \n\n* PIP 9.0.1: pyhton package index \n\n* GIT: command line interface for version control \n\n* PuttyGen SSH key generator utility: AWS pem to pkk conversion on windows\n\n* Putty: connecting to the AWS web server \n\n* WinSCP: synch files between AWS and local \n\n* AWS web interface: deployment environment: EC2, VPC, IAM, CODEDEPLOY\n\n* AWS Command Line Interface \n\n* Under AWS: Ubuntu 16.04\n\n* Under AWS: Apache2 \n\n* Docker: to be used for clean deployment environment\n\n\n \n### The repository contains the clarification and research on the following issues:\n\n* research on the things assigned in homeworks as requirements\n* utterly very important to divide the assigned requirements into tasks and track tasks\n* record tasks as issues with estimated time, and actual time spent\n* learn about github and features\n* learn about coding back end in python (djang)\n* learn about coding in front end like bootstrap\n* document every research and finding as wiki\n* add detailed info and research in wikis\n* learn more about how to utilize zen hub extensively to track time\n\n" }, { "alpha_fraction": 0.38053098320961, "alphanum_fraction": 0.38053098320961, "avg_line_length": 10.199999809265137, "blob_id": "644e7ec8f4dd99470a0f508c599ca6ea3616ea47", "content_id": "bf09e529f6ecbe378b1fbd2497c212cdb2217097", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 113, "license_type": "permissive", "max_line_length": 17, "num_lines": 10, "path": "/server1/engine/core/constants.py", "repo_name": "NurErtem/Fall2016Swe573NurErtem", "src_encoding": "UTF-8", "text": "\nGENDERS = (\n\t('M', 'MALE'),\n\t('F', 'FEMALE'),\n)\n\nFEELINGS = (\n\t('G', 'Good'),\n\t('N', 'Normal'),\n\t('B', 'Bad')\n)\n" }, { "alpha_fraction": 0.7155796885490417, "alphanum_fraction": 0.7155796885490417, "avg_line_length": 31.47058868408203, "blob_id": "2dbbc07296a471f5681bbacda481385e088efeaa", "content_id": "47e0f9feca68434b1a79d4f0850ec04a83fe7259", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 552, "license_type": "permissive", "max_line_length": 79, "num_lines": 17, "path": "/server1/engine/nutrition/serializers.py", "repo_name": "NurErtem/Fall2016Swe573NurErtem", "src_encoding": "UTF-8", "text": "from rest_framework import serializers\n\nfrom nutrition.models import CalorieInput, CalorieOutput\n\n\nclass CalorieInputSerializer(serializers.ModelSerializer):\n\tclass Meta:\n\t\tmodel = CalorieInput\n\t\tfields = [\"date\", \"code\", \"name\", \"unit\", \"quantity\", \"calorie\", \"nutrients\"]\n\t\tread_only_fields = [\"id\", \"created_at\", \"updated_at\"]\n\n\nclass CalorieOutputSerializer(serializers.ModelSerializer):\n\tclass Meta:\n\t\tmodel = CalorieOutput\n\t\tfields = [\"date\", \"code\", \"name\", \"unit\", \"quantity\", \"calorie\"]\n\t\tread_only_fields = [\"id\", \"created_at\", \"updated_at\"]\n" }, { "alpha_fraction": 0.6854087710380554, "alphanum_fraction": 0.6940324306488037, "avg_line_length": 18.3266658782959, "blob_id": "5eb85161ba73ff54231f5af3edc0c111e9919213", "content_id": "3ac3aa0ce9a94db871d202efd03efa6319398db8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2899, "license_type": "permissive", "max_line_length": 65, "num_lines": 150, "path": "/server1/engine/core/views.py", "repo_name": "NurErtem/Fall2016Swe573NurErtem", "src_encoding": "UTF-8", "text": "from django.http import HttpResponse\nfrom django.shortcuts import render\nfrom django.conf import settings\nfrom django.template import RequestContext\nfrom rest_framework import generics\nimport requests\nimport json\nimport os\nimport sqlite3\n\nfrom core import data\nfrom core import menu\nfrom core.models import UserProfile\nfrom core.serializers import UserSerializer\nfrom nutrition.models import CalorieInput, CalorieOutput\n\nfrom core.forms import activitiesMade\nfrom core.serializers import EmbedSerializer\n\ndef dashboard(request):\n\tcards = data.get_cards()\n\tmenu_items = menu.get_menu_items('Dashboard')\n\n\treturn render(request, 'dashboard.html', {\n\t\t'menu_items': menu_items,\n\t\t'cards': cards\n\t})\n\n\ndef activities(request):\n\n\tcontext = RequestContext(request)\n\n\tmenu_items = menu.get_menu_items('Dashboard')\n\twith open(os.path.abspath('activities.json'), 'r') as data_file:\n\t\tactivities = json.load(data_file)\n\n\tif request.method == \"POST\":\n\n\t\tform = activitiesMade(request.POST)\n\n\t\tif form.is_valid():\n\t\t\tform.save()\n\n\t\t\treturn render(request, '5-activities.html', {\n\t\t\t\t'form': form,\n\t\t\t\t'menu_items': menu_items,\n\t\t\t\t'activities': activities\n\t\t\t})\n\t\telse:\n\t\t\tprint(form.errors)\n\telse:\n\t\tform = activitiesMade()\n\n\treturn render(request, '5-activities.html', {\n\t\t'form': form,\n\t\t'menu_items': menu_items,\n\t\t'activities': activities\n\t}, context)\n\n\n\ndef calories(request):\n\n\tcontext = RequestContext(request)\n\n\tmenu_items = menu.get_menu_items('Dashboard')\n\n\tif request.method == \"POST\":\n\n\t\tform = activitiesMade(request.POST)\n\n\t\tif form.is_valid():\n\t\t\tform.save()\n\n\t\t\treturn render(request, '7-consumption.html', {\n\t\t\t\t'form': form,\n\t\t\t\t'menu_items': menu_items,\n\t\t\t})\n\t\telse:\n\t\t\tprint(form.errors)\n\telse:\n\t\tform = activitiesMade()\n\n\treturn render(request, '7-consumption.html', {\n\t\t'form': form,\n\t\t'menu_items': menu_items,\n\t}, context)\n\n\n\ndef inout(request):\n\n\tmenu_items = menu.get_menu_items('Dashboard')\n\n\treturn render(request, '4-in-out.html', {\n\t\t'menu_items': menu_items\n\t})\n\n\ndef login(request):\n\n\tmenu_items = menu.get_menu_items('Dashboard')\n\n\treturn render(request, '2-login.html', {\n\t\t'menu_items': menu_items\n\t})\n\n\ndef mood(request):\n\n\tmenu_items = menu.get_menu_items('Dashboard')\n\n\treturn render(request, '8-mood.html', {\n\t\t'menu_items': menu_items\n\t})\n\n\ndef profile(request):\n\n\tcontext = RequestContext(request)\n\n\tmenu_items = menu.get_menu_items('Profile')\n\n\tprofile = {\n\t\t'name': 'Nur',\n\t\t'surname': 'Ertem Unden',\n\t\t'email': 'ertemnur@gmail.com',\n\t\t'description': 'some description',\n\t\t'age': '32',\n\t\t'birth': '24/04/1976',\n\t\t'bmi': 0,\n\t\t'weight': 70,\n\t\t'height': 175,\n\t}\n\n\treturn render(request, '3-userinfo.html', {\n\t\t'form': form,\n\t\t'profile': profile,\n\t\t'menu_items': menu_items\n\t}, context)\n\n\ndef settings(request):\n\treturn render(request, 'temp/__settings.html')\n\n\nclass UserProfileView(generics.ListCreateAPIView):\n\tqueryset = UserProfile.objects.all()\n\tserializer_class = UserSerializer\n" }, { "alpha_fraction": 0.6830010414123535, "alphanum_fraction": 0.6917877793312073, "avg_line_length": 20.598539352416992, "blob_id": "988081b5480f3b8f141a284b2093730e43b29207", "content_id": "d30106233a3b63f612ff28af790f8944e8d80390", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2959, "license_type": "permissive", "max_line_length": 95, "num_lines": 137, "path": "/server1/engine/core/_old_views.py", "repo_name": "NurErtem/Fall2016Swe573NurErtem", "src_encoding": "UTF-8", "text": "from django.http import HttpResponse\nfrom django.shortcuts import render\nfrom django.conf import settings\nfrom rest_framework import generics\nimport requests\nimport json\nimport os\nimport sqlite3\n\nfrom core import data\nfrom core import menu\nfrom core.models import UserProfile\nfrom core.serializers import UserSerializer\nfrom nutrition.models import CalorieInput, CalorieOutput\n\nfrom core.forms import SubmitEmbed\nfrom core.serializers import EmbedSerializer\n\ndef dashboard(request):\n\tcards = data.get_cards()\n\tmenu_items = menu.get_menu_items('Dashboard')\n\n\treturn render(request, 'dashboard.html', {\n\t\t'menu_items': menu_items,\n\t\t'cards': cards\n\t})\n\n\ndef activities(request):\n\n\tmenu_items = menu.get_menu_items('Dashboard')\n\twith open(os.path.abspath('activities.json'), 'r') as data_file:\n\t\tactivities = json.load(data_file)\n\n\tif request.method == \"POST\":\n'''\n\t\tconn = sqlite3.connect(os.path.abspath('data/db.sqlite3'))\n\t\tc = conn.cursor()\n'''\n\t\tform = SubmitEmbed(request.POST)\n\t\tif form.is_valid():\n\t\t\turl = form.cleaned_data['url']\n'''\n\t\t\tr = requests.get('http://api.embed.ly/1/oembed?key=' + settings.EMBEDLY_KEY + '&url=' + url)\n\t\t\tjsonized = r.json()\n\t\t\tserializer = EmbedSerializer(data=jsonized)\n\t\t\tif serializer.is_valid():\n\t\t\t\tembed = serializer.save()\n'''\n\t\t\t\tinstance = form.save(commit=False)\n\t\t\t\tinstance.saveform = activitiesMade.objects.get(title=offset)\n\t\t\t\tinstance.save()\n\n\t\t\t\treturn render(request, '5-activities.html', {\n\t\t\t\t\t'embed': embed,\n\t\t\t\t\t'menu_items': menu_items,\n\t\t\t\t\t'activities': activities\n\t\t\t\t})\n\telse:\n\t\tform = SubmitEmbed()\n\n\treturn render(request, '5-activities.html', {\n\t\t'form': form,\n\t\t'menu_items': menu_items,\n\t\t'activities': activities\n\t})\n\n\n\ndef inout(request):\n\n\tmenu_items = menu.get_menu_items('Dashboard')\n\n\treturn render(request, '4-in-out.html', {\n\t\t'menu_items': menu_items\n\t})\n\n\ndef login(request):\n\n\tmenu_items = menu.get_menu_items('Dashboard')\n\n\treturn render(request, '2-login.html', {\n\t\t'menu_items': menu_items\n\t})\n\n\ndef mood(request):\n\n\tmenu_items = menu.get_menu_items('Dashboard')\n\n\treturn render(request, '8-mood.html', {\n\t\t'menu_items': menu_items\n\t})\n\n\ndef timeline(request):\n\tmenu_items = menu.get_menu_items('Timeline')\n\n\tintakes = CalorieInput.objects.all()\n\touttakes = CalorieOutput.objects.all()\n\n\treturn render(request, 'timeline.html', {\n\t\t'menu_items': menu_items,\n\t\t'intakes': intakes,\n\t\t'outtakes': outtakes\n\t})\n\n\ndef profile(request):\n\tmenu_items = menu.get_menu_items('Profile')\n\n\tprofile = {\n\t\t'name': 'Nur',\n\t\t'surname': 'Ertem Unden',\n\t\t'email': 'ertemnur@gmail.com',\n\t\t'description': 'some description',\n\t\t'age': '32',\n\t\t'birth': '24/04/1976',\n\t\t'bmi': 0,\n\t\t'weight': 70,\n\t\t'height': 175,\n\n\t}\n\n\treturn render(request, '3-userinfo.html', {\n\t\t'profile': profile,\n\t\t'menu_items': menu_items\n\t})\n\ndef settings(request):\n\treturn render(request, 'temp/__settings.html')\n\n\nclass UserProfileView(generics.ListCreateAPIView):\n\tqueryset = UserProfile.objects.all()\n\tserializer_class = UserSerializer\n" }, { "alpha_fraction": 0.6893805265426636, "alphanum_fraction": 0.6973451375961304, "avg_line_length": 36.66666793823242, "blob_id": "87e1bf7871b4d5e0d5707a9e72ba3dba6369ac81", "content_id": "214d896c427556ceb678d22000fcdf5b01480c22", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1130, "license_type": "permissive", "max_line_length": 77, "num_lines": 30, "path": "/server1/engine/core/urls.py", "repo_name": "NurErtem/Fall2016Swe573NurErtem", "src_encoding": "UTF-8", "text": "\"\"\"KarmaCircle URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n\thttps://docs.djangoproject.com/en/1.10/topics/http/urls/\nExamples:\nFunction views\n\t1. Add an import: from my_app import views\n\t2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n\t1. Add an import: from other_app.views import Home\n\t2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n\t1. Import the include() function: from django.conf.urls import url, include\n\t2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url\nfrom core import views\n\napp_name = \"core\"\nurlpatterns = [\n\turl(r\"^dashboard/$\", views.dashboard, name='dashboard'),\n\turl(r\"^activities/$\", views.activities, name='activities'),\n\turl(r\"^inout/$\", views.inout, name='inout'),\n\turl(r\"^calories/$\", views.calories, name='inout'),\n\turl(r\"^mood/$\", views.mood, name='mood'),\n\turl(r\"^login/$\", views.login, name='login'),\n\turl(r\"^profile/$\", views.profile, name='profile'),\n\n\turl(r\"^profile/$\", views.UserProfileView.as_view()),\n]\n" }, { "alpha_fraction": 0.6948819160461426, "alphanum_fraction": 0.7066929340362549, "avg_line_length": 29.5, "blob_id": "e7b386256faea8961243b39804a97f0421a77f15", "content_id": "c4868e75f404ea34fc1f12cbd1737ebaccd85a92", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1524, "license_type": "permissive", "max_line_length": 77, "num_lines": 50, "path": "/server1/engine/nutrition/models.py", "repo_name": "NurErtem/Fall2016Swe573NurErtem", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.utils import timezone\n\nfrom core.models import RootModel\n\nclass CalorieInput(RootModel):\n\t#user = models.ForeignKey(UserProfile)\n\tdate = models.DateField(default=timezone.now)\n\tcode = models.CharField(max_length=10, null=True)\n\tname = models.CharField(max_length=100, null=True)\n\tunit = models.CharField(max_length=10, null=True)\n\tquantity = models.FloatField(null=False, default=0)\n\tcalorie = models.FloatField(null=False, default=0)\n\tnutrients = models.TextField(null=True)\n\n\tclass Meta:\n\t\tordering = ['date']\n\t\tverbose_name = 'calorieInput'\n\t\tverbose_name_plural = 'calorieInputList'\n\n\t@property\n\tdef full_name(self):\n\t\treturn '%s - %s' % (self.code, self.name)\n\n\tdef __str__(self):\n\t\tresult = [('%s=%s' % (key, value)) for key, value in self.__dict__.items()]\n\t\treturn result.__str__()\n\n\nclass CalorieOutput(RootModel):\n\t#user = models.ForeignKey(UserProfile)\n\tdate = models.DateField(default=timezone.now)\n\tcode = models.CharField(max_length=10, null=True)\n\tname = models.CharField(max_length=100, null=True)\n\tunit = models.CharField(max_length=10, null=True)\n\tquantity = models.FloatField(null=False, default=0)\n\tcalorie = models.FloatField(null=False, default=0)\n\n\tclass Meta:\n\t\tordering = ['date']\n\t\tverbose_name = 'calorieInput'\n\t\tverbose_name_plural = 'calorieInputList'\n\n\t@property\n\tdef full_name(self):\n\t\treturn '%s - %s' % (self.code, self.name)\n\n\tdef __str__(self):\n\t\tresult = [('%s=%s' % (key, value)) for key, value in self.__dict__.items()]\n\t\treturn result.__str__()" }, { "alpha_fraction": 0.7264999747276306, "alphanum_fraction": 0.7360000014305115, "avg_line_length": 28, "blob_id": "cd20d5e374723c97182eb7213d782166f9bd489e", "content_id": "46ffb2c10868d8549883ceb96ba79d2db09c4ccb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2000, "license_type": "permissive", "max_line_length": 85, "num_lines": 69, "path": "/server1/engine/core/models.py", "repo_name": "NurErtem/Fall2016Swe573NurErtem", "src_encoding": "UTF-8", "text": "from django.contrib.auth.models import User\nfrom django.db import models\nfrom django.utils import timezone\n\nfrom core import constants\n\n\nclass BaseModel(models.Model):\n\tid = models.AutoField(primary_key=True)\n\tis_active = models.BooleanField(default=True)\n\n\tclass Meta:\n\t\tabstract = True\n\n\n\nclass RootModel(BaseModel):\n\tcreate_date = models.DateTimeField(editable=False, null=False, default=timezone.now)\n\n\tclass Meta:\n\t\tabstract = True\n\n\nclass activitiesMade(RootModel):\n\tuserId = models.IntegerField(null=False, blank=False)\n\tcalories = models.IntegerField(null=True, blank=True)\n\tactivity = models.CharField(max_length=100)\n\tfor_date = models.DateField(null=True, blank=True, default=timezone.now)\n\tposted_on = models.DateTimeField(auto_now_add=True)\n\n\tclass Meta:\n\t\tordering = ['create_date']\n\n\nclass UserProfile(RootModel):\n\tuser = models.OneToOneField(User, on_delete=models.CASCADE)\n\n\tbirth_date = models.DateField(null=True, blank=True)\n\theight = models.IntegerField(null=True, blank=True)\n\tweight = models.DecimalField(max_digits=3, decimal_places=1, null=True)\n\tgender = models.CharField(max_length=1, choices=constants.GENDERS)\n\tfirst_name = models.CharField(max_length=100)\n\tlast_name = models.CharField(max_length=100)\n\n\tbmi = models.IntegerField(null=True, blank=True)\n\tbmr = models.IntegerField(null=True, blank=True)\n\n\tclass Meta:\n\t\tordering = ['create_date']\n\n\tdef __str__(self):\n\t\tresult = [('%s=%s' % (key, value)) for key, value in self.__dict__.items()]\n\t\treturn result.__str__()\n\n\nclass UserWeightHistory(RootModel):\n\tuser = models.ForeignKey(UserProfile)\n\tweight = models.DecimalField(max_digits=3, decimal_places=1)\n\theight = models.IntegerField(null=True, blank=True)\n\tbmi = models.DecimalField(max_digits=4, decimal_places=2)\n\tdate = models.DateField(default=timezone.now)\n\tnote = models.TextField(max_length=100)\n\n\tclass Meta:\n\t\tordering = ['date']\n\n\tdef __str__(self):\n\t\tresult = [('%s=%s' % (key, value)) for key, value in self.__dict__.items()]\n\t\treturn result.__str__()" }, { "alpha_fraction": 0.7977527976036072, "alphanum_fraction": 0.7977527976036072, "avg_line_length": 21.25, "blob_id": "752a680623099d13c619504778fb4bc68500129a", "content_id": "988b0a56c3a92675110c93843217d7947a3219f1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 89, "license_type": "permissive", "max_line_length": 33, "num_lines": 4, "path": "/server1/engine/nutrition/apps.py", "repo_name": "NurErtem/Fall2016Swe573NurErtem", "src_encoding": "UTF-8", "text": "from django.apps import AppConfig\n\nclass NutritionConfig(AppConfig):\n\tname = 'nutrition'\n" }, { "alpha_fraction": 0.7211018204689026, "alphanum_fraction": 0.7211018204689026, "avg_line_length": 21.58888816833496, "blob_id": "cca0a89f1f04dfc8918fe16f5e656279709987ab", "content_id": "2cf61a1d1829a2b97486e95c1b74b6f47c792f09", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2033, "license_type": "permissive", "max_line_length": 81, "num_lines": 90, "path": "/server1/engine/nutrition/views.py", "repo_name": "NurErtem/Fall2016Swe573NurErtem", "src_encoding": "UTF-8", "text": "from rest_framework.generics import ListCreateAPIView\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom nutrition.models import CalorieInput, CalorieOutput\nfrom nutrition.serializers import CalorieInputSerializer, CalorieOutputSerializer\nfrom nutrition.usda import UsdaFoodService\n\n\nclass CalorieInputListView(ListCreateAPIView):\n\tqueryset = CalorieInput.objects.all().order_by('date')\n\tserializer_class = CalorieInputSerializer\n\n\nclass CalorieOutputListView(ListCreateAPIView):\n\tqueryset = CalorieOutput.objects.all().order_by('date')\n\tserializer_class = CalorieOutputSerializer\n\n\nclass SearchFoodsView(APIView):\n\n\tdef get(self, request):\n\t\tname = request.query_params.get('name')\n\n\t\tfoods = []\n\n\t\tintakes = self.search_intakes(name)\n\t\tfor intake in intakes:\n\t\t\tfood = {\n\t\t\t\t\"type\": \"I\",\n\t\t\t\t\"code\": intake.code,\n\t\t\t\t\"name\": intake.name\n\t\t\t}\n\t\t\tfoods.append(food)\n\n\t\tusdas = self.search_usda(name)\n\t\tfor usda in usdas:\n\t\t\tfood = {\n\t\t\t\t\"type\": \"U\",\n\t\t\t\t\"code\": usda[\"ndbno\"],\n\t\t\t\t\"name\": usda[\"name\"]\n\t\t\t}\n\t\t\tfoods.append(food)\n\n\t\treturn Response(foods)\n\n\tdef search_intakes(self, name):\n\t\tintakes = CalorieInput.objects.filter(name__contains=name)\n\t\treturn list(intakes)\n\n\tdef search_recipes(self, name):\n\t\trecipes = []\n\t\treturn list(recipes)\n\n\tdef search_usda(self, name):\n\t\ttry:\n\t\t\tfoods = UsdaFoodService.search(name=name)\n\t\texcept KeyError:\n\t\t\tfoods = []\n\t\treturn foods\n\n\nclass GetFoodNutrientsView(APIView):\n\tdef get(self, request, code):\n\t\ttry:\n\t\t\tns = UsdaFoodService.get_nutrients(ndbno=code)\n\t\t\tnutrients = self.filter(ns)\n\t\texcept KeyError:\n\t\t\tnutrients = []\n\n\t\treturn Response(nutrients)\n\n\tdef filter(self, ns, filter=False):\n\t\tif not filter:\n\t\t\treturn ns\n\n\t\tVALID_NUTRIENTS = ['Energy', 'Protein', 'Calsium']\n\n\t\tnutrients = []\n\t\tfor n in ns:\n\t\t\tif n['name'] in VALID_NUTRIENTS:\n\t\t\t\tnutrients.append(n)\n\n\t\treturn nutrients\n\n\nclass GetFoodNutrientUnitsView(APIView):\n\tdef get(self, request, code):\n\t\tunits = UsdaFoodService.get_nutrient_units(ndbno=code)\n\t\treturn Response(units)\n" }, { "alpha_fraction": 0.5515151619911194, "alphanum_fraction": 0.5515151619911194, "avg_line_length": 26.58333396911621, "blob_id": "7c994b1e206dfe01180eb05dc76212800c125650", "content_id": "355669f4c13c98d1d384acdf70d101e928a616d7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 330, "license_type": "permissive", "max_line_length": 85, "num_lines": 12, "path": "/server1/engine/static/old/food/consumption-edit.html", "repo_name": "NurErtem/Fall2016Swe573NurErtem", "src_encoding": "UTF-8", "text": "{% extends \"template.html\" %}\n{% load crispy_forms_tags %}\n\n{% block content %}\n <div class=\"form-group\">\n <form action=\"{% url 'food:consumption-edit' %}\" method=\"post\" class=\"post-form\">\n {% csrf_token %}\n {% crispy form %}\n <input type=\"submit\" value=\"Submit\" />\n </form>\n </div>\n{% endblock %}" }, { "alpha_fraction": 0.5295857787132263, "alphanum_fraction": 0.5325443744659424, "avg_line_length": 26.040000915527344, "blob_id": "94a2cee48c826ce5b2d6c94f9f83d17dcf54a63a", "content_id": "1de822824fb0a766639ecef05de685eb4ecca406", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 676, "license_type": "permissive", "max_line_length": 57, "num_lines": 25, "path": "/server1/engine/static/js/usda.js", "repo_name": "NurErtem/Fall2016Swe573NurErtem", "src_encoding": "UTF-8", "text": "\nfunction search_food(name) {\n $.ajax({\n type: 'GET',\n url: '/energy/food/?name=' + name,\n dataType: 'json',\n contentType: 'application/json; charset=utf-8',\n success: function(response) {\n $('#lblData').html(JSON.stringify(response));\n },\n error: function(error) {\n console.log(error);\n }\n });\n}\n\nfunction get_nutrients(ndbno, callback) {\n $.ajax({\n type: 'GET',\n url: '/energy/food/?ndbno=' + ndbno,\n dataType: 'json',\n contentType: 'application/json; charset=utf-8',\n success: callback.success(response),\n error: callback.error(error)\n });\n}" }, { "alpha_fraction": 0.7011834383010864, "alphanum_fraction": 0.7100591659545898, "avg_line_length": 38, "blob_id": "4f299fbdfd1f56879f9e7d919d19405979d3f78a", "content_id": "baf584535be7aa26dc3db717576bb6d4a81757c8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1014, "license_type": "permissive", "max_line_length": 78, "num_lines": 26, "path": "/server1/engine/nutrition/urls.py", "repo_name": "NurErtem/Fall2016Swe573NurErtem", "src_encoding": "UTF-8", "text": "\"\"\"KarmaCircle URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n\thttps://docs.djangoproject.com/en/1.10/topics/http/urls/\nExamples:\nFunction views\n\t1. Add an import: from my_app import views\n\t2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n\t1. Add an import: from other_app.views import Home\n\t2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n\t1. Import the include() function: from django.conf.urls import url, include\n\t2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url\nfrom nutrition import views\n\nurlpatterns = [\n\turl(r\"^food/$\", views.SearchFoodsView.as_view()),\n\turl(r\"^food/(?P<code>.+)/$\", views.GetFoodNutrientsView.as_view()),\n\turl(r\"^food/(?P<code>.+)/units/$\", views.GetFoodNutrientUnitsView.as_view()),\n\n\turl(r\"^in/$\", views.CalorieInputListView.as_view()),\n\turl(r\"^out/$\", views.CalorieOutputListView.as_view()),\n]\n" }, { "alpha_fraction": 0.6805555820465088, "alphanum_fraction": 0.6865941882133484, "avg_line_length": 23.367647171020508, "blob_id": "9edbe1d8a5294cf40d57288031166a4ce438d0ad", "content_id": "212962c8e523055f1dd03af9cba8283f6d426c2e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1656, "license_type": "permissive", "max_line_length": 80, "num_lines": 68, "path": "/server1/engine/nutrition/usda.py", "repo_name": "NurErtem/Fall2016Swe573NurErtem", "src_encoding": "UTF-8", "text": "import requests\n\n\nclass UsdaFoodService(object):\n\n\t__api_base_url = 'http://api.nal.usda.gov/ndb/'\n\t__api_key = 't7dq28LzAEKWJUl3CcB2VIXPDvPBOlIVJYv0mKlW'\n\t__api_format = 'json'\n\n\t@staticmethod\n\tdef __get_api_url(command):\n\t\tapi_url = UsdaFoodService.__api_base_url + command + '/'\n\t\tapi_url = api_url + '?api_key=' + UsdaFoodService.__api_key + \\\n\t\t\t\t\t\t\t'&format=' + UsdaFoodService.__api_format\n\t\treturn api_url\n\n\t@staticmethod\n\tdef __get_report(ndbno):\n\t\turl = UsdaFoodService.__get_api_url('reports') + '&ndbno=' + ndbno + '&type=b'\n\t\tresponse = requests.get(url, allow_redirects=False)\n\n\t\treport = response.json()['report']['food']\n\t\treturn report\n\n\t@staticmethod\n\tdef __get_search(name):\n\t\turl = UsdaFoodService.__get_api_url('search') + '&q=' + name\n\t\tresponse = requests.get(url, allow_redirects=False)\n\n\t\titems = response.json()['list']\n\t\treturn items\n\n\t@staticmethod\n\tdef search(name):\n\t\tresponse = UsdaFoodService.__get_search(name)\n\t\tfoods = response['item']\n\t\treturn foods\n\n\t@staticmethod\n\tdef get_nutrients(ndbno):\n\t\tresponse = UsdaFoodService.__get_report(ndbno)\n\t\tnutrients = response['nutrients']\n\t\treturn nutrients\n\n\t@staticmethod\n\tdef get_nutrient_units(ndbno):\n\t\tnutrients = UsdaFoodService.get_nutrients(ndbno)\n\n\t\tunits = []\n\t\tfor nutrient in nutrients:\n\t\t\tprint(nutrient)\n\t\t\tfor measure in nutrient['measures']:\n\t\t\t\tunits.append(measure['label'])\n\n\t\treturn set(units)\n\nif __name__ == \"__main__\":\n\tfoods = UsdaFoodService.search('apple')\n\tfor food in foods:\n\t\tprint(food)\n\n\tnutrients = UsdaFoodService.get_nutrients('43529')\n\tfor nutrient in nutrients:\n\t\tprint(nutrient)\n\n\tmeasures = UsdaFoodService.get_nutrient_units('43529')\n\tfor measure in measures:\n\t\tprint(measure)" }, { "alpha_fraction": 0.7523573040962219, "alphanum_fraction": 0.7523573040962219, "avg_line_length": 30.984127044677734, "blob_id": "c3036cfe0f359d322341cddbc69d7555bb032e9d", "content_id": "8bbd9064ab759ebb9bad0d4c276b3eadaa23f28c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2015, "license_type": "permissive", "max_line_length": 79, "num_lines": 63, "path": "/server1/engine/core/serializers.py", "repo_name": "NurErtem/Fall2016Swe573NurErtem", "src_encoding": "UTF-8", "text": "from django.contrib.auth import update_session_auth_hash\nfrom django.contrib.auth.models import User\nfrom rest_framework import serializers\nfrom core.models import UserProfile\n\nfrom rest_framework import serializers\nfrom core.models import activitiesMade\n\nclass EmbedSerializer(serializers.ModelSerializer):\n\tclass Meta:\n\t\tmodel = activitiesMade\n\n\nclass UserProfileSerializer(serializers.ModelSerializer):\n\tclass Meta:\n\t\tmodel = UserProfile\n\t\tread_only_fields = ['created_at']\n\t\texclude = ['user', 'is_active', 'bmi']\n\n\nclass UserSerializer(serializers.ModelSerializer):\n\tprofile = UserProfileSerializer(required=False)\n\tpassword = serializers.CharField(write_only=True, required=False)\n\n\tclass Meta:\n\t\tmodel = User\n\t\tfields = [\n\t\t\t'email', 'first_name', 'last_name', 'password', 'profile'\n\t\t]\n\n\tdef create(self, validated_data):\n\t\tprofile_data = validated_data.pop('profile')\n\t\tvalidated_data['username'] = validated_data.get('email', None)\n\n\t\tuser = User.objects.create(**validated_data)\n\t\tprofile = UserProfile.objects.create(user=user, **profile_data)\n\t\treturn profile\n\n\tdef update(self, instance, validated_data):\n\t\tprofile_data = validated_data.pop('profile')\n\n\t\t# Update User data\n\t\tinstance.username = validated_data.get('email', instance.username)\n\t\tinstance.email = validated_data.get('email', instance.email)\n\t\tinstance.first_name = validated_data.get('first_name', instance.first_name)\n\t\tinstance.last_name = validated_data.get('last_name', instance.last_name)\n\n\t\t# Update UserProfile data\n\t\tif not instance.profile:\n\t\t\tUserProfile.objects.create(user=instance, **profile_data)\n\n\t\tinstance.profile.height = profile_data.get('height', instance.profile.height)\n\t\tinstance.profile.weight = profile_data.get('weight', instance.profile.weight)\n\t\tinstance.save()\n\n\t\t# Check if the password has changed\n\t\tpassword = validated_data.get('password', None)\n\t\tif password:\n\t\t\tinstance.set_password(password)\n\t\t\tinstance.save()\n\t\t\tupdate_session_auth_hash(self.context.get('request'), instance)\n\n\t\treturn instance\n" } ]
20
tschicke/Python-Animated-Exporter
https://github.com/tschicke/Python-Animated-Exporter
3c432e7f3c65835b823ed43bc2f245f4f02ef39a
62cf4b43f82e4894148934d2780b53246c48bf50
da8a57ac37139feed2230a566387095cf7c05a1b
refs/heads/master
2016-09-02T03:26:44.385487
2014-09-29T19:23:06
2014-09-29T19:23:06
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4038707911968231, "alphanum_fraction": 0.4211740493774414, "avg_line_length": 39.015384674072266, "blob_id": "b8defdaa079ca685ef55107de957a492b47cf6ef", "content_id": "ae3292baae0cb22a312a55421233825517b6c173", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7802, "license_type": "no_license", "max_line_length": 158, "num_lines": 195, "path": "/export_amesh.py", "repo_name": "tschicke/Python-Animated-Exporter", "src_encoding": "UTF-8", "text": "import bpy\n\ndef save(operator, context, filepath=\"\"):\n bpy.ops.object.mode_set(mode='OBJECT')\n \n vertices = []\n UVs = []\n normals = []\n boneIndices = []\n boneWeights = []\n \n skeletonData = []\n boneIndexLookup = []\n \n indices = []\n \n print(\"Exporting\")\n \n for object in bpy.data.objects:\n if object.type == \"ARMATURE\":\n armature = object.data\n \n startNodes = []\n \n for i in range(0, len(armature.bones)):\n bone = armature.bones[i]\n if bone.parent == None:\n dupIndex = -1\n for j in range(0, len(startNodes)):\n if bone.head[:] == startNodes[j][:3]:\n dupIndex = j\n break\n \n if dupIndex == -1: \n baseNode = bone.head_local\n index = i + len(startNodes)\n parentIndex = -1\n skeletonData.append((baseNode.x, baseNode.z, -baseNode.y, index, parentIndex))\n startNodes.append((baseNode.x, baseNode.y, baseNode.z, index, parentIndex))\n parentIndex = index\n index += 1\n else:\n parentIndex = dupIndex\n index = i + len(startNodes)\n \n tailOffset = bone.tail_local\n skeletonData.append((tailOffset.x, tailOffset.z, -tailOffset.y, index, parentIndex))\n else:\n offset = bone.tail_local\n index = i + len(startNodes)\n parentIndex = -1\n for j in range(0, i):\n if bone.parent == armature.bones[j]:\n parentIndex = j + len(startNodes)\n break\n skeletonData.append((offset.x, offset.z, -offset.y, index, parentIndex))\n print(len(startNodes))\n boneIndexLookup.append(i + len(startNodes))\n \n for object in bpy.data.objects:\n if object.type == 'MESH':\n mesh = object.data\n mesh.calc_tessface()\n \n tempBoneIndices = []\n tempBoneWeights = []\n \n for vert in mesh.vertices:\n weight1 = 0\n weight2 = 0\n index1 = 0\n index2 = 0\n if len(vert.groups) == 0:\n weight1 = 0\n weight2 = 0\n index1 = 0\n index2 = 0\n elif len(vert.groups) == 1:\n weight1 = 0 if vert.groups[0].weight < 0.001 else 1\n weight2 = 0\n index1 = vert.groups[0].group\n index2 = 0\n elif len(vert.groups) == 2:\n weight1 = vert.groups[0].weight\n weight1 = 0 if weight1 < 0.001 else weight1\n weight2 = vert.groups[1].weight\n weight2 = 0 if weight2 < 0.001 else weight2\n index1 = vert.groups[0].group\n index2 = vert.groups[1].group\n else:\n for group in vert.groups:\n if weight1 > weight2:\n if group.weight > weight2:\n weight2 = group.weight\n index2 = group.group\n else:\n if group.weight > weight1:\n weight1 = group.weight\n index1 = group.group\n weight1 = 0 if weight1 < 0.001 else weight1\n weight2 = 0 if weight2 < 0.001 else weight2\n \n #Fix this so that deleting a bone won't shift indices\n if index1 >= len(boneIndexLookup):\n index1 = 0\n weight1 = 0\n if index2 >= len(boneIndexLookup):\n index2 = 0\n weight2 = 0\n divideBy = weight1 + weight2\n if divideBy != 0:\n weight1 /= divideBy\n weight2 /= divideBy\n tempBoneIndices.append((boneIndexLookup[index1], boneIndexLookup[index2]))\n tempBoneWeights.append((weight1, weight2))\n \n indexOffset = 0\n for face in mesh.tessfaces:\n tempIndices = []\n for i in range(0, len(face.vertices)):\n index = face.vertices[i]\n blenderVertex = mesh.vertices[index].co[:]\n outVertex = (blenderVertex[0], blenderVertex[2], -blenderVertex[1])\n UV = mesh.tessface_uv_textures.active.data[face.index].uv[i][:]\n if face.use_smooth:\n blenderNormal = mesh.vertices[index].normal[:]\n outNormal = (blenderNormal[0], blenderNormal[2], -blenderNormal[1])\n else:\n blenderNormal = face.normal[:]\n outNormal = (blenderNormal[0], blenderNormal[2], -blenderNormal[1])\n boneIndex = tempBoneIndices[index]\n boneWeight = tempBoneWeights[index]\n \n dupIndex = -1\n for j in range(0, len(vertices)):\n tempVert = vertices[j]\n tempUV = UVs[j]\n tempNormal = normals[j]\n tempBoneIndex = boneIndices[j]\n tempBoneWeight = boneWeights[j]\n \n if outVertex == tempVert and UV == tempUV and outNormal == tempNormal and boneIndex == tempBoneIndex and boneWeight == tempBoneWeight:\n #Duplicate Found\n dupIndex = j\n break\n if dupIndex != -1:\n #Duplicate Found\n tempIndices.append(dupIndex)\n else:\n vertices.append(outVertex)\n UVs.append(UV)\n normals.append(outNormal)\n boneIndices.append(boneIndex)\n boneWeights.append(boneWeight)\n tempIndices.append(indexOffset)\n indexOffset += 1\n \n if len(tempIndices) == 4:\n indices.append((tempIndices[0], tempIndices[1], tempIndices[2]))\n indices.append((tempIndices[0], tempIndices[2], tempIndices[3]))\n else:\n indices.append((tempIndices[0], tempIndices[1], tempIndices[2]))\n \n print(filepath)\n file = open(filepath, 'w')\n fw = file.write\n fw(\"amdl\\n\")\n fw(\"%i %i %i\\n\" % (len(vertices), len(indices) * 3, len(skeletonData)))\n \n for v in vertices:\n fw(\"v %f %f %f\\n\" % v[:])\n \n for uv in UVs:\n fw(\"t %f %f\\n\" % uv[:])\n \n for n in normals:\n fw(\"n %f %f %f\\n\" % n[:])\n \n for bi in boneIndices:\n fw(\"b %i %i\\n\" % bi[:])\n \n for bw in boneWeights:\n fw(\"w %f %f\\n\" % bw[:])\n \n for i in indices:\n fw(\"i %i %i %i\\n\" % i[:])\n \n for node in skeletonData:\n fw(\"s %f %f %f %i %i\\n\" % node[:])\n \n file.close\n \n print(\"Finished Exporting\")\n \n return {'FINISHED'}" }, { "alpha_fraction": 0.5408450961112976, "alphanum_fraction": 0.5492957830429077, "avg_line_length": 25.649999618530273, "blob_id": "bc62bf4bc746aef4027eb8b5a3556547ba4f84d6", "content_id": "cc3681e0a0803f0959a9246cd0bf88fe798cd50d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1065, "license_type": "no_license", "max_line_length": 138, "num_lines": 40, "path": "/test.py", "repo_name": "tschicke/Python-Animated-Exporter", "src_encoding": "UTF-8", "text": "import os\n\n\ndef clear():\n #os.system('cls' if os.name == 'nt' else 'clear')\n for i in range(0, 30):\n print()\n\n\nimport bpy\n\nmesh = bpy.data.objects['Cube'].data\nskeleton = bpy.data.objects['Armature'].data\n\ndef printFunc():\n bpy.ops.object.mode_set(mode='OBJECT')\n clear()\n mesh.calc_tessface()\n \n file = open(\"C:\\\\Users\\\\Tyler\\\\AppData\\\\Roaming\\\\Blender Foundation\\\\Blender\\\\2.70\\\\scripts\\\\addons\\\\io_mesh_animated\\\\test.txt\", 'w')\n fw = file.write\n \n for vertex in mesh.vertices:\n print(\"Coord \", vertex.co[0])\n for group in vertex.groups:\n print(\"Tail Local \", skeleton.bones[group.group].tail_local[0])\n \n for i in range(0, len(skeleton.bones)):\n bone = skeleton.bones[i]\n fw(\"%i\\n\" % i)\n fw(bone.name + \"\\n\")\n fw(\"head %f %f %f\\n\" % bone.head[:])\n fw(\"head local %f %f %f\\n\" % bone.head_local[:])\n fw(\"tail %f %f %f\\n\" % bone.tail[:])\n fw(\"tail local %f %f %f\\n\" % bone.tail_local[:])\n \n file.close()\n \n\nprintFunc()" } ]
2
aahuerta1/Memory-Puzzle
https://github.com/aahuerta1/Memory-Puzzle
717ba8f31157c68a45eb0fe7bf3ab09c6de0622a
4d6b6ebeff8bdf30b2d93f5b2666a58a62a8571f
6da1b55aec35a12f16068836e8f5aa137967d270
refs/heads/master
2022-12-10T08:30:19.755881
2022-12-01T01:06:25
2022-12-01T01:06:25
251,460,764
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.5825610756874084, "alphanum_fraction": 0.6089113354682922, "avg_line_length": 36.88298034667969, "blob_id": "199673c56cc745a6531fc8673fb656f54e7edef2", "content_id": "75daef645bf0e821f3e9618131144c02347679cc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 21366, "license_type": "no_license", "max_line_length": 170, "num_lines": 564, "path": "/Game/MemoryPuzzle.py", "repo_name": "aahuerta1/Memory-Puzzle", "src_encoding": "UTF-8", "text": "import random,pygame, sys, time\n\nfrom pygame import font\nfrom pygame.locals import * # puts a limited set of constants and functions into the global namespace of our script\nfrom pygame.mixer import pause\n\n### ====================================================================================================\n# if I improve the game I should make it so that every time that I win a game it gets gradually harder.\n### ====================================================================================================\n\npygame.mixer.pre_init(44100, 16, 2, 4090) # sets up a mixer which is what python uses for sound\npygame.init() # initializes pygame\n\nFPS = 30 # frames per second, the general speed of the program\ndisplayWidth = 640 # size of window's width in pixels\ndisplayHeight = 480 # size of windows' height in pixels\nbox_reveal_spd = 8 # speed boxes' sliding reveals and covers\nbox_size = 40 # size of box height & width in pixels\ngap = 10 # size of gap between boxes in pixels\ntile_width = 4 # number of columns of icons 6\ntile_height = 4 # number of rows of icons 5\n\n# ----------------------- Make Game Screen -------------------------\ngameDisplay = pygame.display.set_mode((displayWidth, displayHeight),\n pygame.FULLSCREEN) # This represents the window that opens up (GUI)\nclock = pygame.time.Clock() # This is what helps us define time in the game\n\nfullscreen = False\npause = False\npygame.display.set_caption('Memory Puzzle Game') # Displays the name of the game (top right)\n\n# ----------------------- Play Background Music --------------------\n# this doesnt work\n# pygame.mixer.music.load(\"Tokyo Daylight\") # add in the music file\n# pygame.mixer.music.set_volume(0.15) # min 0-1 max\n# pygame.mixer.music.play(-1) # the -1 means to loop endlessly\n\nassert (tile_width * tile_height) % 2 == 0, 'Board needs to have an even number of boxes for pairs of matches.'\nx_margin = int((displayWidth - (tile_width * (box_size + gap))) / 2)\ny_margin = int((displayHeight - (tile_height * (box_size + gap))) / 2)\n\n# R G B\nGRAY = (100, 100, 100)\nDARKBLUE = (0, 0, 100)\nWHITE = (255, 255, 255)\nRED = (200, 0, 0)\nGREEN = (0, 150, 0)\nPINK = (255, 182, 193)\nYELLOW = (255, 255, 0)\nORANGE = (255, 128, 0)\nPURPLE = (255, 0, 255)\nCYAN = (0, 255, 255)\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\nBLUE = (0, 0, 255)\nLIGHTBLUE = (60, 60, 100)\nBLUEGREY = (27, 55, 82)\nMUSTARDYELLOW = (206, 161, 8)\nLIGHTYELLOW = (255, 215, 68)\nPINKPEACH = (255, 197, 192)\nLIGHTPEACH = (255, 141, 152)\n\n# Button Color Effects\nBRIGHTRED = (255, 0, 0)\nBRIGHTGREEN = (0, 255, 0)\nLIGHTGREY = (211, 211, 211)\nHOTPINK = (255, 105, 180)\n\nbackgroud_color = BLUEGREY\nlightBGcolor = GRAY\nbox_color = WHITE\nhighlight_color = PINK\n\ndonut = 'donut'\nsquare = 'square'\ndiamond = 'diamond'\nlines = 'lines'\noval = 'oval'\n\nall_colors = (RED, GREEN, PINK, YELLOW, ORANGE, PURPLE, CYAN, PINK, WHITE, BLACK, BLUE)\nall_shapes = (donut, square, diamond, lines, oval)\nassert len(all_colors) * len(all_shapes) * 2 >= tile_width * tile_height, \\\n \"Board is too big for the number of shapes/colors defined.\"\n\n\ndef text_objects(text, font):\n textSurface = font.render(text, True, BLACK)\n return textSurface, textSurface.get_rect()\n\n\ndef messageDisplay(text):\n largeText = pygame.font.Font('freesansbold.ttf', 115)\n textSurf, textRect = text_objects(\"Memory Puzzle Game\", largeText)\n textRect.center = ((displayWidth / 2), (displayHeight / 2))\n gameDisplay.blit(textSurf, textRect)\n\n pygame.display.update() # updates texts on our display\n\n time.sleep(2)\n\n main()\n\n\ndef button(msg, x, y, w, h, inactiveColor, activeColor, fontSize, eventAction=None):\n mouse = pygame.mouse.get_pos() # gets the position of the mouse\n click = pygame.mouse.get_pressed()\n\n if x + w > mouse[0] > x and y + h > mouse[1] > y: # Causes Highlight of BUTTON\n # X, Y, Width, Height\n pygame.draw.rect(gameDisplay, activeColor, (x, y, w, h))\n if click[0] == 1 and eventAction != None:\n eventAction()\n else:\n pygame.draw.rect(gameDisplay, inactiveColor, (x, y, w, h))\n\n smallText = pygame.font.Font(\"freesansbold.ttf\", fontSize)\n textSurf, textRect = text_objects(msg, smallText)\n textRect.center = ((x + (w / 2)), (y + (h / 2)))\n gameDisplay.blit(textSurf, textRect)\n\n\ndef blit_text(surface, text, pos, font,\n color=pygame.Color('black')): # allows me to have multiple limes show up in my code.\n words = [word.split(' ') for word in text.splitlines()] # 2D array where each row is a list of words.\n space = font.size(' ')[0] # The width of a space.\n max_width, max_height = surface.get_size()\n x, y = pos\n for line in words:\n for word in line:\n word_surface = font.render(word, 0, color)\n word_width, word_height = word_surface.get_size()\n if x + word_width >= max_width:\n x = pos[0] # Reset the x.\n y += word_height # Start on new row.\n surface.blit(word_surface, (x, y))\n x += word_width + space\n x = pos[0] # Reset the x.\n y += word_height # Start on new row.\n\n\ntutorial_text = \"--- Game Tutorial: --- \\n1. Click start to start the game (game will beign right away). \" \\\n \"\\n\\n2. The tiles will appear and uncover 5 at a time. ( so make sure to pay attention to where you see matching pairs)\" \\\n \"\\n\\n3. Click a tile and it will uncover its symbol, to find its pair click a different tile. (MUST MATCH)\" \\\n \"\\n\\n4. If the player was successful in finding a pair, the images will stay exposed. If the player did not, the tiles will revert back to being covered.\" \\\n \"\\n\\n5. Once the player has successfully finished the puzzle a winning animation will play. However, shortly after the game will restart. \" \\\n \"\\n\\nTO MOVE TO THE NEXT PAGE CLICK ANYWHERE ON THE SCREEN OR CLICK THE NEXT BUTTON \" \\\n \"\\n\\n--- WARNING: No data will be saved! ---\"\n\ntutorial_text2 = \"--- Game Tutorial (Continued) --- \\nThere will be two buttons available during your game play one being \\'Go Back\\' and the other being \\'Tutorial\\'.\" \\\n \"\\n\\nClicking \\'Go Back\\' will take you back to the title page. YOUR PROGRESS WILL BE DELETED. \" \\\n \"\\n\\nOn the other hand clicking on Tutorial will take you to the game\\'s Tutorial and SAVE YOUR PROGRESS IN THE GAME\" \\\n \"\\n\\nThe player also has the option of pressing the key \\'P\\' this will pause the GAME as well as the MUSIC, until you either click \\'Unpause\\' or \\'Restart\\'\" \\\n \"\\n\\nThe player may also exit the game when they\\'re in the paused screen\" \\\n\ntutorial_text3 = \"--- Settings Page: --- \\n\\n1. The player can customize the music of the game (ON/OFF). \" \\\n \"\\n\\n2. Once you selected what you wish to set your music to you may click the \\'Go Back\\' button to return to the title screen. \" \\\n \"\\n\\nPlease note that when you click on the \\'Music On\\' button the music will play again from the beginning\"\\\n \"\\n\\n--- Quit Button: --- \\n\\n1. Once the player does not want to play anymore, the player can click the quit button and will be exited out of the game.\"\n\npause_text = \"Press \\'P\\' to pause the game\"\n\nfont = pygame.font.Font(\"freesansbold.ttf\", 20)\n\n\ndef tutorial_page():\n display_instructions = True\n instruction_page = 1\n\n # -------- Tutorial Page Loop -----------\n while display_instructions:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n if event.type == pygame.MOUSEBUTTONDOWN:\n instruction_page += 1\n if instruction_page == 4:\n display_instructions = False\n\n # Set the screen background\n gameDisplay.fill(GRAY)\n\n if instruction_page == 1:\n # Draw instructions, page 1\n button(\"Next Page\", 450, 410, 150, 50, GREEN, BRIGHTGREEN, 20)\n blit_text(gameDisplay, tutorial_text, (20, 20), font)\n\n if instruction_page == 2:\n # Draw instructions, page 2\n button(\"Next Page\", 450, 410, 150, 50, GREEN, BRIGHTGREEN, 20)\n blit_text(gameDisplay, tutorial_text2, (20, 20), font)\n\n if instruction_page == 3:\n # Draw instructions, page 2\n button(\"Done\", 450, 410, 150, 50, RED, BRIGHTRED, 20)\n blit_text(gameDisplay, tutorial_text3, (20, 20), font)\n\n # Go ahead and update the screen with what we've drawn.\n pygame.display.update()\n # Limit to 60 frames per second\n clock.tick(60)\n\n\ndef unpaused():\n global pause\n pygame.mixer.music.unpause()\n\n pause = False\n\n\ndef paused():\n pygame.mixer.music.pause()\n\n while pause:\n for event in pygame.event.get():\n\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n\n gameDisplay.fill(WHITE)\n\n largeText = pygame.font.SysFont(\"comicsansms\", 115)\n TextSurf, TextRect = text_objects(\"Paused\", largeText)\n TextRect.center = ((displayWidth / 2), (displayHeight / 2))\n gameDisplay.blit(TextSurf, TextRect)\n\n button(\"Unpause\", 50, 340, 100, 50, GREEN, BRIGHTGREEN, 18, unpaused)\n\n button(\"Restart\", 225, 325, 200, 75, PINKPEACH, LIGHTPEACH, 40, main)\n\n button(\"Exit Game\", 500, 340, 100, 50, RED, BRIGHTRED, 18, quitgame)\n\n pygame.display.update()\n clock.tick(15)\n\ndef music_on():\n pygame.mixer.music.play()\n\n\ndef music_off():\n pygame.mixer.music.stop()\n\n\ndef settings():\n global gameDisplay\n settings_page = True\n\n while settings_page:\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n quit()\n\n gameDisplay.fill(backgroud_color)\n\n button(\"Music On\", 150, 150, 150, 50, GRAY, LIGHTGREY, 20, music_on)\n button(\"Music Off\", 350, 150, 150, 50, GRAY, LIGHTGREY, 20, music_off)\n button(\"Go Back\", 250, 250, 150, 50, RED, BRIGHTRED, 20, game_Intro)\n\n pygame.display.update()\n clock.tick(60)\n\n\ndef quitgame():\n pygame.quit()\n quit()\n\n\ndef game_Intro():\n intro = True\n\n while intro:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n\n gameDisplay.fill(backgroud_color)\n largeText = pygame.font.Font('freesansbold.ttf', 58)\n TextSurf, TextRect = text_objects(\"Memory Puzzle Game\", largeText)\n TextRect.center = ((displayWidth / 2), (displayHeight / 2))\n gameDisplay.blit(TextSurf, TextRect)\n\n # PLAY BUTTON\n button(\"Start!\", 50, 340, 100, 50, GREEN, BRIGHTGREEN, 20, main)\n\n # Tutorial Button\n button(\"Tutorial\", 225, 325, 200, 75, PINKPEACH, LIGHTPEACH, 40, tutorial_page)\n\n # Settings Button\n button(\"Settings\", 500, 25, 100, 75, MUSTARDYELLOW, LIGHTYELLOW, 20, settings)\n\n # QUIT BUTTON\n button(\"Quit!\", 500, 340, 100, 50, RED, BRIGHTRED, 20, quitgame)\n\n pygame.display.update()\n clock.tick(15)\n\n\ndef main():\n pygame.mixer.music.unpause()\n\n global clock, gameDisplay, pause\n\n mousex = 0 # used to store x coordinate of mouse event\n mousey = 0 # used to store y coordinate of mouse event\n\n mainBoard = getRandomizedBoard()\n revealedBoxes = generateRevealedBoxesData(False)\n\n firstSelection = None # stores the (x, y) of the first box clicked.\n\n gameDisplay.fill(backgroud_color)\n startGameAnimation(mainBoard)\n\n while True: # main game loop\n mouseClicked = False\n\n gameDisplay.fill(backgroud_color) # drawing the window\n drawBoard(mainBoard, revealedBoxes)\n\n for event in pygame.event.get(): # event handling loop\n if event.type == QUIT or (event.type == KEYUP and event.key == K_ESCAPE):\n pygame.quit()\n sys.exit()\n elif event.type == MOUSEMOTION:\n mousex, mousey = event.pos\n elif event.type == MOUSEBUTTONUP:\n mousex, mousey = event.pos\n mouseClicked = True\n if event.type == pygame.KEYDOWN: # Pauses Game\n if event.key == pygame.K_p:\n pause = True\n paused()\n\n boxx, boxy = getBoxAtPixel(mousex, mousey)\n if boxx != None and boxy != None:\n # The mouse is currently over a box.\n if not revealedBoxes[boxx][boxy]:\n drawHighlightBox(boxx, boxy)\n if not revealedBoxes[boxx][boxy] and mouseClicked:\n revealBoxesAnimation(mainBoard, [(boxx, boxy)])\n revealedBoxes[boxx][boxy] = True # set the box as \"revealed\"\n if firstSelection == None: # the current box was the first box clicked\n firstSelection = (boxx, boxy)\n else: # the current box was the second box clicked\n # Check if there is a match between the two icons.\n icon1shape, icon1color = getShapeAndColor(mainBoard, firstSelection[0], firstSelection[1])\n icon2shape, icon2color = getShapeAndColor(mainBoard, boxx, boxy)\n\n if icon1shape != icon2shape or icon1color != icon2color:\n # Icons don't match. Re-cover up both selections.\n pygame.time.wait(1000) # 1000 milliseconds = 1 sec\n coverBoxesAnimation(mainBoard, [(firstSelection[0], firstSelection[1]), (boxx, boxy)])\n revealedBoxes[firstSelection[0]][firstSelection[1]] = False\n revealedBoxes[boxx][boxy] = False\n elif wonGame(revealedBoxes): # check if all pairs found\n winningAnimation(mainBoard)\n pygame.time.wait(2000)\n\n # Reset the board\n mainBoard = getRandomizedBoard()\n revealedBoxes = generateRevealedBoxesData(False)\n\n # Show the fully unrevealed board for a second.\n drawBoard(mainBoard, revealedBoxes)\n pygame.display.update()\n pygame.time.wait(1000)\n\n # Replay the start game animation.\n startGameAnimation(mainBoard)\n firstSelection = None # reset firstSelection variable\n\n button(\"Instructions\", 500, 25, 125, 75, PINKPEACH, HOTPINK, 20, tutorial_page)\n button(\"Go Back\", 30, 25, 100, 75, RED, BRIGHTRED, 20, game_Intro)\n blit_text(gameDisplay, pause_text, (180, 45), font)\n\n # Redraw the screen and wait a clock tick.\n pygame.display.update()\n clock.tick(FPS)\n\n\ndef generateRevealedBoxesData(val):\n revealedBoxes = []\n for i in range(tile_width):\n revealedBoxes.append([val] * tile_height)\n return revealedBoxes\n\n\ndef getRandomizedBoard():\n # Get a list of every possible shape in every possible color.\n icons = []\n for color in all_colors:\n for shape in all_shapes:\n icons.append((shape, color))\n\n random.shuffle(icons) # randomize the order of the icons list\n numIconsUsed = int(tile_width * tile_height / 2) # calculate how many icons are needed\n icons = icons[:numIconsUsed] * 2 # make two of each\n random.shuffle(icons)\n\n # Create the board data structure, with randomly placed icons.\n board = []\n for x in range(tile_width):\n column = []\n for y in range(tile_height):\n column.append(icons[0])\n del icons[0] # remove the icons as we assign them\n board.append(column)\n return board\n\n\ndef splitIntoGroupsOf(groupSize, theList):\n # splits a list into a list of lists, where the inner lists have at\n # most groupSize number of items.\n result = []\n for i in range(0, len(theList), groupSize):\n result.append(theList[i:i + groupSize])\n return result\n\n\ndef leftTopCoordsOfBox(boxx, boxy):\n # Convert board coordinates to pixel coordinates\n left = boxx * (box_size + gap) + x_margin\n top = boxy * (box_size + gap) + y_margin\n return (left, top)\n\n\ndef getBoxAtPixel(x, y):\n for boxx in range(tile_width):\n for boxy in range(tile_height):\n left, top = leftTopCoordsOfBox(boxx, boxy)\n boxRect = pygame.Rect(left, top, box_size, box_size)\n if boxRect.collidepoint(x, y):\n return (boxx, boxy)\n return (None, None)\n\n\ndef drawIcon(shape, color, boxx, boxy):\n quarter = int(box_size * 0.25) # syntactic sugar\n half = int(box_size * 0.5) # syntactic sugar\n\n left, top = leftTopCoordsOfBox(boxx, boxy) # get pixel coords from board coords\n # Draw the shapes\n if shape == donut:\n pygame.draw.circle(gameDisplay, color, (left + half, top + half), half - 5)\n pygame.draw.circle(gameDisplay, backgroud_color, (left + half, top + half), quarter - 5)\n elif shape == square:\n pygame.draw.rect(gameDisplay, color, (left + quarter, top + quarter, box_size - half, box_size - half))\n elif shape == diamond:\n pygame.draw.polygon(gameDisplay, color, (\n (left + half, top), (left + box_size - 1, top + half), (left + half, top + box_size - 1),\n (left, top + half)))\n elif shape == lines:\n for i in range(0, box_size, 4):\n pygame.draw.line(gameDisplay, color, (left, top + i), (left + i, top))\n pygame.draw.line(gameDisplay, color, (left + i, top + box_size - 1), (left + box_size - 1, top + i))\n elif shape == oval:\n pygame.draw.ellipse(gameDisplay, color, (left, top + quarter, box_size, half))\n\n\ndef getShapeAndColor(board, boxx, boxy):\n # shape value for x, y spot is stored in board[x][y][0]\n # color value for x, y spot is stored in board[x][y][1]\n return board[boxx][boxy][0], board[boxx][boxy][1]\n\n\ndef drawBoxCovers(board, boxes, coverage):\n # Draws boxes being covered/revealed. \"boxes\" is a list\n # of two-item lists, which have the x & y spot of the box.\n for box in boxes:\n left, top = leftTopCoordsOfBox(box[0], box[1])\n pygame.draw.rect(gameDisplay, backgroud_color, (left, top, box_size, box_size))\n shape, color = getShapeAndColor(board, box[0], box[1])\n drawIcon(shape, color, box[0], box[1])\n if coverage > 0: # only draw the cover if there is an coverage\n pygame.draw.rect(gameDisplay, box_color, (left, top, coverage, box_size))\n pygame.display.update()\n clock.tick(FPS)\n\n\ndef revealBoxesAnimation(board, boxesToReveal):\n # Do the \"box reveal\" animation.\n for coverage in range(box_size, (-box_reveal_spd) - 150, -box_reveal_spd):\n drawBoxCovers(board, boxesToReveal, coverage)\n\n\ndef coverBoxesAnimation(board, boxesToCover):\n # Do the \"box cover\" animation.\n for coverage in range(0, box_size + box_reveal_spd, box_reveal_spd):\n drawBoxCovers(board, boxesToCover, coverage)\n\n\ndef drawBoard(board, revealed):\n # Draws all of the boxes in their covered or revealed state.\n for boxx in range(tile_width):\n for boxy in range(tile_height):\n left, top = leftTopCoordsOfBox(boxx, boxy)\n if not revealed[boxx][boxy]:\n # Draw a covered box.\n pygame.draw.rect(gameDisplay, box_color, (left, top, box_size, box_size))\n else:\n # Draw the (revealed) icon.\n shape, color = getShapeAndColor(board, boxx, boxy)\n drawIcon(shape, color, boxx, boxy)\n\n\ndef drawHighlightBox(boxx, boxy):\n left, top = leftTopCoordsOfBox(boxx, boxy)\n pygame.draw.rect(gameDisplay, highlight_color, (left - 5, top - 5, box_size + 10, box_size + 10), 4)\n\n\ndef startGameAnimation(board):\n # Randomly reveal the boxes 5 at a time.\n coveredBoxes = generateRevealedBoxesData(False)\n boxes = []\n for x in range(tile_width):\n for y in range(tile_height):\n boxes.append((x, y))\n random.shuffle(boxes)\n boxGroups = splitIntoGroupsOf(5, boxes)\n\n drawBoard(board, coveredBoxes)\n for boxGroup in boxGroups:\n revealBoxesAnimation(board, boxGroup)\n coverBoxesAnimation(board, boxGroup)\n\ndef winningAnimation(board):\n # flash the background color when the player has won\n coveredBoxes = generateRevealedBoxesData(True)\n color1 = lightBGcolor\n color2 = backgroud_color\n\n for i in range(13):\n pygame.time.wait(150) # Causes the Winning text to blink\n\n largeText = pygame.font.SysFont('comicsansms', 115)\n textSurf, textRect = text_objects(\"Winner!!\", largeText)\n textRect.center = ((displayWidth / 2), (displayHeight / 2))\n gameDisplay.blit(textSurf, textRect)\n\n pygame.display.update()\n\n color1, color2 = color2, color1 # swap colors\n gameDisplay.fill(color1)\n drawBoard(board, coveredBoxes)\n\n pygame.time.wait(300)\n pygame.display.update()\n\n\ndef wonGame(revealedBoxes):\n # Returns True if all the boxes have been revealed, otherwise False\n for i in revealedBoxes:\n if False in i:\n return False # return False if any boxes are covered.\n return True\n\n\ngame_Intro()\nmain()\npygame.quit()\nquit()\n" }, { "alpha_fraction": 0.7830188870429993, "alphanum_fraction": 0.7830188870429993, "avg_line_length": 41.599998474121094, "blob_id": "3967a75c76a1539f90064d30ac508e735ecb4f68", "content_id": "f21db35122b7232e7488ac6dfc4906db71bff1b4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 212, "license_type": "no_license", "max_line_length": 85, "num_lines": 5, "path": "/README.md", "repo_name": "aahuerta1/Memory-Puzzle", "src_encoding": "UTF-8", "text": "# Memory-Puzzle\nMaking a memory puzzle game using pycharm in python.\n\n#!Please be sure to update everything and comment on any part you changed(reasoning).\nDon\"t forget to push your work so that we can all see it" } ]
2
viathus/Image-object-detection-application
https://github.com/viathus/Image-object-detection-application
39f8349a74b86c3ce6c939fd58f869b45868b6d7
142e59fbf913775e2fc8f59f3b824e1bd532ac56
92b95684236f6c9cbf8de0f5ec73c891d1d5b6ce
refs/heads/master
2021-09-08T19:54:47.873689
2021-09-07T22:14:16
2021-09-07T22:14:16
183,115,866
4
3
null
null
null
null
null
[ { "alpha_fraction": 0.6170212626457214, "alphanum_fraction": 0.6566771864891052, "avg_line_length": 40.987674713134766, "blob_id": "d91f74d07948143e3c09e66fe9593f8c06248cfb", "content_id": "ab6392bad49d1d71942338468b8bc100e0daba24", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 78371, "license_type": "no_license", "max_line_length": 195, "num_lines": 1866, "path": "/designs/design.py", "repo_name": "viathus/Image-object-detection-application", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'loginwindow.ui'\n#\n# Created by: PyQt5 UI code generator 5.12.1\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\n\nclass Ui_MainWindow(object):\n def setupUi(self, MainWindow):\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(1219, 856)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth())\n MainWindow.setSizePolicy(sizePolicy)\n font = QtGui.QFont()\n font.setFamily(\"MS Sans Serif\")\n font.setPointSize(3)\n MainWindow.setFont(font)\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(\"./images/detection.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n MainWindow.setWindowIcon(icon)\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName(\"centralwidget\")\n self.stackedWidget = QtWidgets.QStackedWidget(self.centralwidget)\n self.stackedWidget.setGeometry(QtCore.QRect(-10, 0, 1231, 841))\n font = QtGui.QFont()\n font.setFamily(\"Segoe WP Semibold\")\n font.setBold(True)\n font.setWeight(75)\n self.stackedWidget.setFont(font)\n self.stackedWidget.setStyleSheet(\"QStackedWidget#login_page{\\n\"\n\"background-color: #464646;\\n\"\n\"}\")\n self.stackedWidget.setObjectName(\"stackedWidget\")\n self.first_page = QtWidgets.QWidget()\n self.first_page.setStyleSheet(\"QWidget#first_page{\\n\"\n\"background-color: #464646;\\n\"\n\"}\")\n self.first_page.setObjectName(\"first_page\")\n self.menuBox = QtWidgets.QGroupBox(self.first_page)\n self.menuBox.setGeometry(QtCore.QRect(10, 0, 271, 841))\n self.menuBox.setStyleSheet(\"QGroupBox{\\n\"\n\"border: none;\\n\"\n\"background-color: #2f2f2f;\\n\"\n\"}\")\n self.menuBox.setTitle(\"\")\n self.menuBox.setObjectName(\"menuBox\")\n self.usernotifications = QtWidgets.QPushButton(self.menuBox)\n self.usernotifications.setGeometry(QtCore.QRect(0, 130, 271, 41))\n font = QtGui.QFont()\n font.setFamily(\"Segoe UI Semibold\")\n font.setPointSize(10)\n font.setBold(False)\n font.setWeight(50)\n self.usernotifications.setFont(font)\n self.usernotifications.setStyleSheet(\"QPushButton#usernotifications{\\n\"\n\" background-color: Transparent;\\n\"\n\" background-repeat:no-repeat;\\n\"\n\"color: white;\\n\"\n\" border: none;\\n\"\n\"\\n\"\n\" outline:none;\\n\"\n\" Text-align:left;\\n\"\n\"padding-left: 20px;\\n\"\n\"}\\n\"\n\"QPushButton:hover#usernotifications{\\n\"\n\" background-color: #5e5e5e;\\n\"\n\"}\")\n icon1 = QtGui.QIcon()\n icon1.addPixmap(QtGui.QPixmap(\"./images/notification.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.usernotifications.setIcon(icon1)\n self.usernotifications.setIconSize(QtCore.QSize(23, 23))\n self.usernotifications.setObjectName(\"usernotifications\")\n self.webcamDetect = QtWidgets.QPushButton(self.menuBox)\n self.webcamDetect.setGeometry(QtCore.QRect(0, 180, 271, 41))\n font = QtGui.QFont()\n font.setFamily(\"Segoe WP Semibold\")\n font.setPointSize(10)\n font.setBold(False)\n font.setWeight(50)\n self.webcamDetect.setFont(font)\n self.webcamDetect.setStyleSheet(\"QPushButton#webcamDetect{\\n\"\n\" background-color: Transparent;\\n\"\n\" background-repeat:no-repeat;\\n\"\n\" border: none;\\n\"\n\"\\n\"\n\"color: white;\\n\"\n\" outline:none;\\n\"\n\" Text-align:left;\\n\"\n\"padding-left: 20px;\\n\"\n\"}\\n\"\n\"QPushButton:hover#webcamDetect{\\n\"\n\" background-color: #5e5e5e;\\n\"\n\"}\")\n icon2 = QtGui.QIcon()\n icon2.addPixmap(QtGui.QPixmap(\"./images/webcamdetect.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.webcamDetect.setIcon(icon2)\n self.webcamDetect.setIconSize(QtCore.QSize(22, 22))\n self.webcamDetect.setObjectName(\"webcamDetect\")\n self.detectimage = QtWidgets.QPushButton(self.menuBox)\n self.detectimage.setGeometry(QtCore.QRect(0, 230, 271, 41))\n font = QtGui.QFont()\n font.setFamily(\"Segoe WP Semibold\")\n font.setPointSize(10)\n font.setBold(False)\n font.setWeight(50)\n self.detectimage.setFont(font)\n self.detectimage.setStyleSheet(\"QPushButton#detectimage{\\n\"\n\" background-color: Transparent;\\n\"\n\" background-repeat:no-repeat;\\n\"\n\" border: none;\\n\"\n\" cursor:pointer;\\n\"\n\"color: white;\\n\"\n\" Text-align:left;\\n\"\n\"padding-left: 20px;\\n\"\n\"}\\n\"\n\"QPushButton:hover#detectimage{\\n\"\n\" background-color: #5e5e5e;\\n\"\n\"\\n\"\n\"}\")\n icon3 = QtGui.QIcon()\n icon3.addPixmap(QtGui.QPixmap(\"./images/Dslr-Camera-icon.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.detectimage.setIcon(icon3)\n self.detectimage.setIconSize(QtCore.QSize(23, 23))\n self.detectimage.setObjectName(\"detectimage\")\n self.detectvideo = QtWidgets.QPushButton(self.menuBox)\n self.detectvideo.setGeometry(QtCore.QRect(0, 280, 271, 41))\n font = QtGui.QFont()\n font.setFamily(\"Segoe UI Semibold\")\n font.setPointSize(10)\n font.setBold(False)\n font.setWeight(50)\n self.detectvideo.setFont(font)\n self.detectvideo.setStyleSheet(\"QPushButton#detectvideo{\\n\"\n\" background-color: Transparent;\\n\"\n\" background-repeat:no-repeat;\\n\"\n\" border: none;\\n\"\n\" cursor:pointer;\\n\"\n\"color: white;\\n\"\n\" Text-align:left;\\n\"\n\"padding-left: 20px;\\n\"\n\"}\\n\"\n\"QPushButton:hover#detectvideo{\\n\"\n\" background-color: #5e5e5e;\\n\"\n\"\\n\"\n\"}\")\n icon4 = QtGui.QIcon()\n icon4.addPixmap(QtGui.QPixmap(\"./images/folder-red-video-icon.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.detectvideo.setIcon(icon4)\n self.detectvideo.setIconSize(QtCore.QSize(25, 25))\n self.detectvideo.setObjectName(\"detectvideo\")\n self.userSettings = QtWidgets.QPushButton(self.menuBox)\n self.userSettings.setGeometry(QtCore.QRect(0, 330, 271, 41))\n font = QtGui.QFont()\n font.setFamily(\"Segoe UI Semibold\")\n font.setPointSize(10)\n font.setBold(False)\n font.setWeight(50)\n self.userSettings.setFont(font)\n self.userSettings.setStyleSheet(\"QPushButton#userSettings{\\n\"\n\" background-color: Transparent;\\n\"\n\" background-repeat:no-repeat;\\n\"\n\" border: none;\\n\"\n\"color: white;\\n\"\n\" outline:none;\\n\"\n\" Text-align:left;\\n\"\n\"padding-left: 22px;\\n\"\n\"}\\n\"\n\"QPushButton:hover#userSettings{\\n\"\n\" background-color: #5e5e5e;\\n\"\n\"}\")\n icon5 = QtGui.QIcon()\n icon5.addPixmap(QtGui.QPixmap(\"./images/settings.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.userSettings.setIcon(icon5)\n self.userSettings.setIconSize(QtCore.QSize(23, 23))\n self.userSettings.setObjectName(\"userSettings\")\n self.logout = QtWidgets.QPushButton(self.menuBox)\n self.logout.setGeometry(QtCore.QRect(0, 380, 271, 41))\n font = QtGui.QFont()\n font.setFamily(\"Segoe UI Semibold\")\n font.setPointSize(10)\n font.setBold(False)\n font.setWeight(50)\n self.logout.setFont(font)\n self.logout.setStyleSheet(\"QPushButton#logout{\\n\"\n\" background-color: Transparent;\\n\"\n\" background-repeat:no-repeat;\\n\"\n\" border: none;\\n\"\n\" outline:none;\\n\"\n\" Text-align:left;\\n\"\n\"color: white;\\n\"\n\"padding-left: 18px;\\n\"\n\"}\\n\"\n\"QPushButton:hover#logout{\\n\"\n\" background-color: #5e5e5e;\\n\"\n\"}\")\n icon6 = QtGui.QIcon()\n icon6.addPixmap(QtGui.QPixmap(\"./images/exit.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.logout.setIcon(icon6)\n self.logout.setIconSize(QtCore.QSize(30, 30))\n self.logout.setObjectName(\"logout\")\n self.smokeTitle = QtWidgets.QPushButton(self.menuBox)\n self.smokeTitle.setGeometry(QtCore.QRect(10, 20, 251, 41))\n font = QtGui.QFont()\n font.setFamily(\"Segoe UI Semibold\")\n font.setPointSize(15)\n font.setBold(True)\n font.setWeight(75)\n self.smokeTitle.setFont(font)\n self.smokeTitle.setStyleSheet(\"QPushButton#smokeTitle{\\n\"\n\" background-color: Transparent;\\n\"\n\" background-repeat:no-repeat;\\n\"\n\" color: white;\\n\"\n\" border: none;\\n\"\n\" outline:none;\\n\"\n\" Text-align:left;\\n\"\n\" padding-left: 20px;\\n\"\n\"}\\n\"\n\"\")\n self.smokeTitle.setIcon(icon)\n self.smokeTitle.setIconSize(QtCore.QSize(25, 25))\n self.smokeTitle.setObjectName(\"smokeTitle\")\n self.line_2 = QtWidgets.QFrame(self.menuBox)\n self.line_2.setGeometry(QtCore.QRect(20, 168, 221, 16))\n self.line_2.setFrameShape(QtWidgets.QFrame.HLine)\n self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.line_2.setObjectName(\"line_2\")\n self.line_3 = QtWidgets.QFrame(self.menuBox)\n self.line_3.setGeometry(QtCore.QRect(20, 218, 221, 16))\n self.line_3.setFrameShape(QtWidgets.QFrame.HLine)\n self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.line_3.setObjectName(\"line_3\")\n self.line_4 = QtWidgets.QFrame(self.menuBox)\n self.line_4.setGeometry(QtCore.QRect(20, 268, 221, 16))\n self.line_4.setFrameShape(QtWidgets.QFrame.HLine)\n self.line_4.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.line_4.setObjectName(\"line_4\")\n self.line_5 = QtWidgets.QFrame(self.menuBox)\n self.line_5.setGeometry(QtCore.QRect(20, 318, 221, 16))\n self.line_5.setFrameShape(QtWidgets.QFrame.HLine)\n self.line_5.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.line_5.setObjectName(\"line_5\")\n self.line_6 = QtWidgets.QFrame(self.menuBox)\n self.line_6.setGeometry(QtCore.QRect(20, 368, 221, 16))\n self.line_6.setFrameShape(QtWidgets.QFrame.HLine)\n self.line_6.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.line_6.setObjectName(\"line_6\")\n self.stackedWidget_4 = QtWidgets.QStackedWidget(self.menuBox)\n self.stackedWidget_4.setGeometry(QtCore.QRect(10, 440, 251, 381))\n self.stackedWidget_4.setStyleSheet(\"QStackedWidget#stackedWidget_4{\\n\"\n\"border: none;\\n\"\n\"background-color: #2f2f2f;\\n\"\n\"}\")\n self.stackedWidget_4.setObjectName(\"stackedWidget_4\")\n self.page_4 = QtWidgets.QWidget()\n self.page_4.setObjectName(\"page_4\")\n self.stackedWidget_4.addWidget(self.page_4)\n self.page_5 = QtWidgets.QWidget()\n self.page_5.setObjectName(\"page_5\")\n self.label_34 = QtWidgets.QLabel(self.page_5)\n self.label_34.setGeometry(QtCore.QRect(0, 20, 251, 21))\n font = QtGui.QFont()\n font.setFamily(\"Segoe WP Semibold\")\n font.setPointSize(13)\n font.setBold(True)\n font.setWeight(75)\n self.label_34.setFont(font)\n self.label_34.setStyleSheet(\"QLabel#label_34{\\n\"\n\" color: white;\\n\"\n\"}\")\n self.label_34.setAlignment(QtCore.Qt.AlignCenter)\n self.label_34.setObjectName(\"label_34\")\n self.label_35 = QtWidgets.QLabel(self.page_5)\n self.label_35.setGeometry(QtCore.QRect(10, 80, 201, 21))\n font = QtGui.QFont()\n font.setFamily(\"Segoe UI Semibold\")\n font.setPointSize(12)\n font.setBold(True)\n font.setWeight(75)\n self.label_35.setFont(font)\n self.label_35.setStyleSheet(\"QLabel{\\n\"\n\"color: white;\\n\"\n\"}\")\n self.label_35.setObjectName(\"label_35\")\n self.checkBox = QtWidgets.QCheckBox(self.page_5)\n self.checkBox.setGeometry(QtCore.QRect(30, 100, 101, 21))\n font = QtGui.QFont()\n font.setFamily(\"Segoe UI Semibold\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n self.checkBox.setFont(font)\n self.checkBox.setStyleSheet(\"QCheckBox\\n\"\n\"{\\n\"\n\"color: white;\\n\"\n\"}\")\n self.checkBox.setObjectName(\"checkBox\")\n self.checkBox_2 = QtWidgets.QCheckBox(self.page_5)\n self.checkBox_2.setGeometry(QtCore.QRect(30, 120, 91, 21))\n font = QtGui.QFont()\n font.setFamily(\"Segoe UI Semibold\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n self.checkBox_2.setFont(font)\n self.checkBox_2.setStyleSheet(\"QCheckBox{\\n\"\n\"color: white;\\n\"\n\"}\")\n self.checkBox_2.setObjectName(\"checkBox_2\")\n self.checkBox_3 = QtWidgets.QCheckBox(self.page_5)\n self.checkBox_3.setGeometry(QtCore.QRect(30, 140, 101, 21))\n font = QtGui.QFont()\n font.setFamily(\"Segoe UI Semibold\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n self.checkBox_3.setFont(font)\n self.checkBox_3.setStyleSheet(\"QCheckBox{\\n\"\n\"color: white;\\n\"\n\"}\")\n self.checkBox_3.setObjectName(\"checkBox_3\")\n self.label_36 = QtWidgets.QLabel(self.page_5)\n self.label_36.setGeometry(QtCore.QRect(10, 190, 241, 21))\n font = QtGui.QFont()\n font.setFamily(\"Segoe WP Semibold\")\n font.setPointSize(12)\n font.setBold(True)\n font.setWeight(75)\n self.label_36.setFont(font)\n self.label_36.setStyleSheet(\"QLabel{\\n\"\n\"color: white;\\n\"\n\"}\")\n self.label_36.setObjectName(\"label_36\")\n self.dateEdit_2 = QtWidgets.QDateEdit(self.page_5)\n self.dateEdit_2.setGeometry(QtCore.QRect(10, 220, 101, 21))\n font = QtGui.QFont()\n font.setFamily(\"Segoe WP Semibold\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n self.dateEdit_2.setFont(font)\n self.dateEdit_2.setObjectName(\"dateEdit_2\")\n self.dateEdit_3 = QtWidgets.QDateEdit(self.page_5)\n self.dateEdit_3.setGeometry(QtCore.QRect(150, 220, 101, 21))\n font = QtGui.QFont()\n font.setFamily(\"Segoe WP Semibold\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n self.dateEdit_3.setFont(font)\n self.dateEdit_3.setObjectName(\"dateEdit_3\")\n self.label_37 = QtWidgets.QLabel(self.page_5)\n self.label_37.setGeometry(QtCore.QRect(110, 220, 31, 21))\n font = QtGui.QFont()\n font.setFamily(\"Segoe WP Semibold\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n self.label_37.setFont(font)\n self.label_37.setStyleSheet(\"QLabel{\\n\"\n\"color: white;\\n\"\n\"}\")\n self.label_37.setAlignment(QtCore.Qt.AlignCenter)\n self.label_37.setObjectName(\"label_37\")\n self.filterbtn = QtWidgets.QPushButton(self.page_5)\n self.filterbtn.setGeometry(QtCore.QRect(30, 330, 191, 31))\n font = QtGui.QFont()\n font.setFamily(\"Segoe UI Semibold\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n self.filterbtn.setFont(font)\n self.filterbtn.setStyleSheet(\"QPushButton#filterbtn{\\n\"\n\" background-color: Transparent;\\n\"\n\" background-repeat:no-repeat;\\n\"\n\" border: 2px solid white;\\n\"\n\" color: white;\\n\"\n\"border-radius:8px;\\n\"\n\" \\n\"\n\"}\\n\"\n\"\\n\"\n\"QPushButton:hover#filterbtn{\\n\"\n\" background-color: white;\\n\"\n\" border-color: white;\\n\"\n\" color: black;\\n\"\n\"border-radius: 8px;\\n\"\n\" border: 2px solid white;\\n\"\n\"}\")\n self.filterbtn.setObjectName(\"filterbtn\")\n self.label_38 = QtWidgets.QLabel(self.page_5)\n self.label_38.setGeometry(QtCore.QRect(10, 250, 241, 21))\n font = QtGui.QFont()\n font.setFamily(\"Segoe WP Semibold\")\n font.setPointSize(9)\n font.setBold(True)\n font.setWeight(75)\n self.label_38.setFont(font)\n self.label_38.setStyleSheet(\"QLabel{\\n\"\n\"color: white;\\n\"\n\"}\")\n self.label_38.setObjectName(\"label_38\")\n self.stackedWidget_4.addWidget(self.page_5)\n self.line_9 = QtWidgets.QFrame(self.menuBox)\n self.line_9.setGeometry(QtCore.QRect(20, 118, 221, 16))\n self.line_9.setFrameShape(QtWidgets.QFrame.HLine)\n self.line_9.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.line_9.setObjectName(\"line_9\")\n self.homebutton = QtWidgets.QPushButton(self.menuBox)\n self.homebutton.setGeometry(QtCore.QRect(0, 80, 271, 41))\n font = QtGui.QFont()\n font.setFamily(\"Segoe UI Semibold\")\n font.setPointSize(10)\n font.setBold(False)\n font.setWeight(50)\n self.homebutton.setFont(font)\n self.homebutton.setStyleSheet(\"QPushButton#homebutton{\\n\"\n\" background-color: Transparent;\\n\"\n\" background-repeat:no-repeat;\\n\"\n\"color: white;\\n\"\n\" border: none;\\n\"\n\"\\n\"\n\" outline:none;\\n\"\n\" Text-align:left;\\n\"\n\"padding-left: 20px;\\n\"\n\"}\\n\"\n\"QPushButton:hover#homebutton{\\n\"\n\" background-color: #5e5e5e;\\n\"\n\"}\")\n icon7 = QtGui.QIcon()\n icon7.addPixmap(QtGui.QPixmap(\"./images/house.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.homebutton.setIcon(icon7)\n self.homebutton.setIconSize(QtCore.QSize(23, 23))\n self.homebutton.setObjectName(\"homebutton\")\n self.stackedWidget_2 = QtWidgets.QStackedWidget(self.first_page)\n self.stackedWidget_2.setGeometry(QtCore.QRect(283, 10, 941, 821))\n font = QtGui.QFont()\n font.setFamily(\"Segoe UI Semibold\")\n font.setPointSize(9)\n font.setBold(True)\n font.setWeight(75)\n self.stackedWidget_2.setFont(font)\n self.stackedWidget_2.setStyleSheet(\"QStackedWidget#stackedWidget_2{\\n\"\n\"background-color: #464646;\\n\"\n\"}\")\n self.stackedWidget_2.setObjectName(\"stackedWidget_2\")\n self.setting_page = QtWidgets.QWidget()\n self.setting_page.setObjectName(\"setting_page\")\n self.changeUserInfobtn = QtWidgets.QPushButton(self.setting_page)\n self.changeUserInfobtn.setGeometry(QtCore.QRect(90, 90, 161, 31))\n font = QtGui.QFont()\n font.setFamily(\"Segoe WP Semibold\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n self.changeUserInfobtn.setFont(font)\n self.changeUserInfobtn.setStyleSheet(\"QPushButton#changeUserInfobtn{\\n\"\n\" background-color: Transparent;\\n\"\n\" background-repeat:no-repeat;\\n\"\n\" border: none;\\n\"\n\" color: white;\\n\"\n\" outline:none;\\n\"\n\" \\n\"\n\"}\\n\"\n\"QPushButton:hover#changeUserInfobtn{\\n\"\n\" text-decoration: underline;\\n\"\n\" \\n\"\n\"}\")\n self.changeUserInfobtn.setObjectName(\"changeUserInfobtn\")\n self.Changepasswordbtn = QtWidgets.QPushButton(self.setting_page)\n self.Changepasswordbtn.setGeometry(QtCore.QRect(270, 90, 161, 31))\n font = QtGui.QFont()\n font.setFamily(\"Segoe UI Semibold\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n self.Changepasswordbtn.setFont(font)\n self.Changepasswordbtn.setStyleSheet(\"QPushButton#Changepasswordbtn{\\n\"\n\" background-color: Transparent;\\n\"\n\" background-repeat:no-repeat;\\n\"\n\" border: none;\\n\"\n\" color: white;\\n\"\n\" outline:none;\\n\"\n\" \\n\"\n\"}\\n\"\n\"QPushButton:hover#Changepasswordbtn{\\n\"\n\" text-decoration: underline;\\n\"\n\" \\n\"\n\"}\")\n self.Changepasswordbtn.setObjectName(\"Changepasswordbtn\")\n self.stackedWidget_3 = QtWidgets.QStackedWidget(self.setting_page)\n self.stackedWidget_3.setGeometry(QtCore.QRect(10, 150, 581, 391))\n self.stackedWidget_3.setStyleSheet(\"QStackedWidget#stackedWidget_3{\\n\"\n\"background-color: #464646;\\n\"\n\"}\")\n self.stackedWidget_3.setObjectName(\"stackedWidget_3\")\n self.page = QtWidgets.QWidget()\n self.page.setObjectName(\"page\")\n self.lineEdit_5 = QtWidgets.QLineEdit(self.page)\n self.lineEdit_5.setGeometry(QtCore.QRect(50, 160, 241, 31))\n self.lineEdit_5.setStyleSheet(\"QLineEdit#lineEdit_5{\\n\"\n\"border-radius:8px;\\n\"\n\"padding-left: 5px;\\n\"\n\"}\")\n self.lineEdit_5.setText(\"\")\n self.lineEdit_5.setObjectName(\"lineEdit_5\")\n self.label_28 = QtWidgets.QLabel(self.page)\n self.label_28.setGeometry(QtCore.QRect(50, 140, 91, 21))\n font = QtGui.QFont()\n font.setFamily(\"Segoe UI Semibold\")\n font.setPointSize(9)\n font.setBold(True)\n font.setWeight(75)\n self.label_28.setFont(font)\n self.label_28.setStyleSheet(\"QLabel#label_28{\\n\"\n\"color: white;\\n\"\n\"}\")\n self.label_28.setObjectName(\"label_28\")\n self.label_16 = QtWidgets.QLabel(self.page)\n self.label_16.setGeometry(QtCore.QRect(50, 20, 91, 21))\n font = QtGui.QFont()\n font.setFamily(\"Segoe WP Semibold\")\n font.setPointSize(9)\n font.setBold(True)\n font.setWeight(75)\n self.label_16.setFont(font)\n self.label_16.setStyleSheet(\"QLabel#label_16{\\n\"\n\"color: white;\\n\"\n\"}\")\n self.label_16.setObjectName(\"label_16\")\n self.lineEdit_3 = QtWidgets.QLineEdit(self.page)\n self.lineEdit_3.setGeometry(QtCore.QRect(50, 40, 241, 31))\n self.lineEdit_3.setStyleSheet(\"QLineEdit#lineEdit_3{\\n\"\n\"border-radius:8px;\\n\"\n\"padding-left: 5px;\\n\"\n\"}\")\n self.lineEdit_3.setObjectName(\"lineEdit_3\")\n self.lineEdit_4 = QtWidgets.QLineEdit(self.page)\n self.lineEdit_4.setGeometry(QtCore.QRect(50, 100, 241, 31))\n self.lineEdit_4.setStyleSheet(\"QLineEdit#lineEdit_4{\\n\"\n\"border-radius:8px;\\n\"\n\"padding-left: 5px;\\n\"\n\"}\")\n self.lineEdit_4.setObjectName(\"lineEdit_4\")\n self.label_22 = QtWidgets.QLabel(self.page)\n self.label_22.setGeometry(QtCore.QRect(50, 80, 91, 21))\n font = QtGui.QFont()\n font.setFamily(\"Segoe UI Semibold\")\n font.setPointSize(9)\n font.setBold(True)\n font.setWeight(75)\n self.label_22.setFont(font)\n self.label_22.setStyleSheet(\"QLabel#label_22{\\n\"\n\"color: white;\\n\"\n\"}\")\n self.label_22.setObjectName(\"label_22\")\n self.saveuserinfobtn = QtWidgets.QPushButton(self.page)\n self.saveuserinfobtn.setGeometry(QtCore.QRect(250, 260, 161, 31))\n font = QtGui.QFont()\n font.setFamily(\"Segoe UI Semibold\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n self.saveuserinfobtn.setFont(font)\n self.saveuserinfobtn.setStyleSheet(\"QPushButton#saveuserinfobtn{\\n\"\n\" background-color: Transparent;\\n\"\n\" background-repeat:no-repeat;\\n\"\n\" border: 2px solid white;\\n\"\n\" color: white;\\n\"\n\"border-radius:8px;\\n\"\n\" \\n\"\n\"}\\n\"\n\"\\n\"\n\"QPushButton:hover#saveuserinfobtn{\\n\"\n\" background-color: white;\\n\"\n\" border-color: white;\\n\"\n\" color: black;\\n\"\n\"border-radius: 8px;\\n\"\n\" border: 2px solid white;\\n\"\n\"}\")\n self.saveuserinfobtn.setObjectName(\"saveuserinfobtn\")\n self.stackedWidget_3.addWidget(self.page)\n self.page_2 = QtWidgets.QWidget()\n self.page_2.setObjectName(\"page_2\")\n self.label_29 = QtWidgets.QLabel(self.page_2)\n self.label_29.setGeometry(QtCore.QRect(50, 20, 121, 16))\n font = QtGui.QFont()\n font.setFamily(\"Segoe UI Semibold\")\n font.setPointSize(9)\n font.setBold(True)\n font.setWeight(75)\n self.label_29.setFont(font)\n self.label_29.setStyleSheet(\"QLabel#label_29{\\n\"\n\"color: white;\\n\"\n\"}\")\n self.label_29.setObjectName(\"label_29\")\n self.label_30 = QtWidgets.QLabel(self.page_2)\n self.label_30.setGeometry(QtCore.QRect(50, 80, 141, 21))\n font = QtGui.QFont()\n font.setFamily(\"Segoe WP Semibold\")\n font.setPointSize(9)\n font.setBold(True)\n font.setWeight(75)\n self.label_30.setFont(font)\n self.label_30.setStyleSheet(\"QLabel#label_30{\\n\"\n\"color: white;\\n\"\n\"}\")\n self.label_30.setObjectName(\"label_30\")\n self.label_31 = QtWidgets.QLabel(self.page_2)\n self.label_31.setGeometry(QtCore.QRect(50, 140, 141, 21))\n font = QtGui.QFont()\n font.setFamily(\"Segoe WP Semibold\")\n font.setPointSize(9)\n font.setBold(True)\n font.setWeight(75)\n self.label_31.setFont(font)\n self.label_31.setStyleSheet(\"QLabel#label_31{\\n\"\n\"color: white;\\n\"\n\"}\")\n self.label_31.setObjectName(\"label_31\")\n self.lineEdit_7 = QtWidgets.QLineEdit(self.page_2)\n self.lineEdit_7.setGeometry(QtCore.QRect(50, 100, 241, 31))\n self.lineEdit_7.setStyleSheet(\"QLineEdit#lineEdit_7{\\n\"\n\"border-radius:8px;\\n\"\n\"padding-left: 5px;\\n\"\n\"}\")\n self.lineEdit_7.setObjectName(\"lineEdit_7\")\n self.lineEdit_8 = QtWidgets.QLineEdit(self.page_2)\n self.lineEdit_8.setGeometry(QtCore.QRect(50, 160, 241, 31))\n self.lineEdit_8.setStyleSheet(\"QLineEdit#lineEdit_8{\\n\"\n\"border-radius:8px;\\n\"\n\"padding-left: 5px;\\n\"\n\"}\")\n self.lineEdit_8.setObjectName(\"lineEdit_8\")\n self.lineEdit_6 = QtWidgets.QLineEdit(self.page_2)\n self.lineEdit_6.setGeometry(QtCore.QRect(50, 40, 241, 31))\n self.lineEdit_6.setStyleSheet(\"QLineEdit#lineEdit_6{\\n\"\n\"border-radius:8px;\\n\"\n\"padding-left: 5px;\\n\"\n\"}\")\n self.lineEdit_6.setObjectName(\"lineEdit_6\")\n self.savenewpasswordbtn = QtWidgets.QPushButton(self.page_2)\n self.savenewpasswordbtn.setGeometry(QtCore.QRect(250, 260, 161, 31))\n font = QtGui.QFont()\n font.setFamily(\"Segoe WP Semibold\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n self.savenewpasswordbtn.setFont(font)\n self.savenewpasswordbtn.setStyleSheet(\"QPushButton#savenewpasswordbtn{\\n\"\n\" background-color: Transparent;\\n\"\n\" background-repeat:no-repeat;\\n\"\n\" border: 2px solid white;\\n\"\n\" color: white;\\n\"\n\"border-radius:8px;\\n\"\n\" \\n\"\n\"}\\n\"\n\"\\n\"\n\"QPushButton:hover#savenewpasswordbtn{\\n\"\n\" background-color: white;\\n\"\n\" border-color: white;\\n\"\n\" color: black;\\n\"\n\"border-radius: 8px;\\n\"\n\" border: 2px solid white;\\n\"\n\"}\")\n self.savenewpasswordbtn.setObjectName(\"savenewpasswordbtn\")\n self.errorlabel = QtWidgets.QLabel(self.page_2)\n self.errorlabel.setGeometry(QtCore.QRect(80, 210, 441, 21))\n font = QtGui.QFont()\n font.setFamily(\"Segoe UI Semibold\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n self.errorlabel.setFont(font)\n self.errorlabel.setStyleSheet(\"QLabel#errorlabel{\\n\"\n\" color: red;\\n\"\n\"}\")\n self.errorlabel.setText(\"\")\n self.errorlabel.setObjectName(\"errorlabel\")\n self.stackedWidget_3.addWidget(self.page_2)\n self.line_8 = QtWidgets.QFrame(self.setting_page)\n self.line_8.setGeometry(QtCore.QRect(250, 90, 20, 31))\n self.line_8.setFrameShape(QtWidgets.QFrame.VLine)\n self.line_8.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.line_8.setObjectName(\"line_8\")\n self.stackedWidget_2.addWidget(self.setting_page)\n self.notification_page = QtWidgets.QWidget()\n self.notification_page.setObjectName(\"notification_page\")\n self.label_7 = QtWidgets.QLabel(self.notification_page)\n self.label_7.setGeometry(QtCore.QRect(10, 0, 171, 31))\n font = QtGui.QFont()\n font.setFamily(\"Segoe UI Semibold\")\n font.setPointSize(15)\n font.setBold(True)\n font.setWeight(75)\n self.label_7.setFont(font)\n self.label_7.setStyleSheet(\"QLabel#label_7{\\n\"\n\"color: white;\\n\"\n\"}\")\n self.label_7.setObjectName(\"label_7\")\n self.listWidget_2 = QtWidgets.QListWidget(self.notification_page)\n self.listWidget_2.setGeometry(QtCore.QRect(10, 30, 921, 781))\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.listWidget_2.sizePolicy().hasHeightForWidth())\n self.listWidget_2.setSizePolicy(sizePolicy)\n self.listWidget_2.setStyleSheet(\"QListWidget#listWidget_2{\\n\"\n\"background-color: Transparent;\\n\"\n\" background-repeat:no-repeat;\\n\"\n\"border: none;\\n\"\n\"}\")\n self.listWidget_2.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\n self.listWidget_2.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\n self.listWidget_2.setFlow(QtWidgets.QListView.LeftToRight)\n self.listWidget_2.setProperty(\"isWrapping\", True)\n self.listWidget_2.setObjectName(\"listWidget_2\")\n self.stackedWidget_2.addWidget(self.notification_page)\n self.largeImage = QtWidgets.QWidget()\n self.largeImage.setObjectName(\"largeImage\")\n self.backToNotification = QtWidgets.QPushButton(self.largeImage)\n self.backToNotification.setGeometry(QtCore.QRect(40, 20, 141, 31))\n font = QtGui.QFont()\n font.setFamily(\"Segoe UI Semibold\")\n font.setPointSize(12)\n font.setBold(True)\n font.setWeight(75)\n self.backToNotification.setFont(font)\n self.backToNotification.setStyleSheet(\"QPushButton#backToNotification{\\n\"\n\" background-color: Transparent;\\n\"\n\" background-repeat:no-repeat;\\n\"\n\" border: none;\\n\"\n\"\\n\"\n\"color: white;\\n\"\n\" outline:none;\\n\"\n\" Text-align:left;\\n\"\n\"padding-left: 20px;\\n\"\n\"}\\n\"\n\"\\n\"\n\"QPushButton:hover#backToNotification{\\n\"\n\"color: blue;\\n\"\n\"}\\n\"\n\"\")\n icon8 = QtGui.QIcon()\n icon8.addPixmap(QtGui.QPixmap(\"./images/left-arrow.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.backToNotification.setIcon(icon8)\n self.backToNotification.setIconSize(QtCore.QSize(30, 30))\n self.backToNotification.setObjectName(\"backToNotification\")\n self.label_6 = QtWidgets.QLabel(self.largeImage)\n self.label_6.setGeometry(QtCore.QRect(240, 20, 211, 21))\n font = QtGui.QFont()\n font.setFamily(\"Segoe UI Semibold\")\n font.setPointSize(12)\n font.setBold(True)\n font.setWeight(75)\n self.label_6.setFont(font)\n self.label_6.setStyleSheet(\"QLabel#label_6{\\n\"\n\"color: white;\\n\"\n\"}\")\n self.label_6.setText(\"\")\n self.label_6.setObjectName(\"label_6\")\n self.label_32 = QtWidgets.QLabel(self.largeImage)\n self.label_32.setGeometry(QtCore.QRect(460, 20, 211, 21))\n font = QtGui.QFont()\n font.setFamily(\"Segoe UI Semibold\")\n font.setPointSize(12)\n font.setBold(True)\n font.setWeight(75)\n self.label_32.setFont(font)\n self.label_32.setStyleSheet(\"QLabel#label_32{\\n\"\n\"color: white;\\n\"\n\"}\")\n self.label_32.setText(\"\")\n self.label_32.setObjectName(\"label_32\")\n self.label_33 = QtWidgets.QLabel(self.largeImage)\n self.label_33.setGeometry(QtCore.QRect(720, 20, 211, 21))\n font = QtGui.QFont()\n font.setFamily(\"Segoe UI Semibold\")\n font.setPointSize(12)\n font.setBold(True)\n font.setWeight(75)\n self.label_33.setFont(font)\n self.label_33.setStyleSheet(\"QLabel#label_33{\\n\"\n\"color: white;\\n\"\n\"}\")\n self.label_33.setText(\"\")\n self.label_33.setObjectName(\"label_33\")\n self.enlargeImagelabel = QtWidgets.QLabel(self.largeImage)\n self.enlargeImagelabel.setGeometry(QtCore.QRect(80, 90, 831, 691))\n self.enlargeImagelabel.setText(\"\")\n self.enlargeImagelabel.setObjectName(\"enlargeImagelabel\")\n self.stackedWidget_2.addWidget(self.largeImage)\n self.page_6 = QtWidgets.QWidget()\n self.page_6.setObjectName(\"page_6\")\n self.label_39 = QtWidgets.QLabel(self.page_6)\n self.label_39.setGeometry(QtCore.QRect(30, 50, 161, 61))\n font = QtGui.QFont()\n font.setFamily(\"Segoe UI Semibold\")\n font.setPointSize(24)\n font.setBold(True)\n font.setWeight(75)\n self.label_39.setFont(font)\n self.label_39.setStyleSheet(\"QLabel#label_39{\\n\"\n\"color: white;\\n\"\n\"}\")\n self.label_39.setObjectName(\"label_39\")\n self.label_41 = QtWidgets.QLabel(self.page_6)\n self.label_41.setGeometry(QtCore.QRect(60, 150, 261, 21))\n font = QtGui.QFont()\n font.setFamily(\"Segoe UI Semibold\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n self.label_41.setFont(font)\n self.label_41.setStyleSheet(\"QLabel#label_41{\\n\"\n\"color: white;\\n\"\n\"}\")\n self.label_41.setObjectName(\"label_41\")\n self.label_42 = QtWidgets.QLabel(self.page_6)\n self.label_42.setGeometry(QtCore.QRect(90, 190, 531, 41))\n self.label_42.setStyleSheet(\"QLabel#label_42{\\n\"\n\"color: white;\\n\"\n\"}\")\n self.label_42.setObjectName(\"label_42\")\n self.label_43 = QtWidgets.QLabel(self.page_6)\n self.label_43.setGeometry(QtCore.QRect(90, 230, 741, 51))\n self.label_43.setStyleSheet(\"QLabel#label_43{\\n\"\n\"color: white;\\n\"\n\"}\")\n self.label_43.setObjectName(\"label_43\")\n self.label_44 = QtWidgets.QLabel(self.page_6)\n self.label_44.setGeometry(QtCore.QRect(90, 280, 711, 41))\n self.label_44.setStyleSheet(\"QLabel#label_44{\\n\"\n\"color: white;\\n\"\n\"}\")\n self.label_44.setObjectName(\"label_44\")\n self.label_45 = QtWidgets.QLabel(self.page_6)\n self.label_45.setGeometry(QtCore.QRect(90, 330, 721, 41))\n self.label_45.setStyleSheet(\"QLabel#label_45{\\n\"\n\"color: white;\\n\"\n\"}\")\n self.label_45.setObjectName(\"label_45\")\n self.label_46 = QtWidgets.QLabel(self.page_6)\n self.label_46.setGeometry(QtCore.QRect(90, 380, 721, 41))\n self.label_46.setText(\"\")\n self.label_46.setObjectName(\"label_46\")\n self.label_47 = QtWidgets.QLabel(self.page_6)\n self.label_47.setGeometry(QtCore.QRect(60, 390, 221, 21))\n font = QtGui.QFont()\n font.setFamily(\"Segoe UI Semibold\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n self.label_47.setFont(font)\n self.label_47.setStyleSheet(\"QLabel#label_47{\\n\"\n\"color: white;\\n\"\n\"}\")\n self.label_47.setObjectName(\"label_47\")\n self.label_48 = QtWidgets.QLabel(self.page_6)\n self.label_48.setGeometry(QtCore.QRect(90, 520, 701, 31))\n self.label_48.setStyleSheet(\"QLabel#label_48{\\n\"\n\"color: white;\\n\"\n\"}\")\n self.label_48.setObjectName(\"label_48\")\n self.label_49 = QtWidgets.QLabel(self.page_6)\n self.label_49.setGeometry(QtCore.QRect(90, 470, 711, 31))\n self.label_49.setStyleSheet(\"QLabel#label_49{\\n\"\n\"color: white;\\n\"\n\"}\")\n self.label_49.setObjectName(\"label_49\")\n self.label_50 = QtWidgets.QLabel(self.page_6)\n self.label_50.setGeometry(QtCore.QRect(90, 420, 711, 41))\n self.label_50.setStyleSheet(\"QLabel#label_50{\\n\"\n\"color: white;\\n\"\n\"}\")\n self.label_50.setObjectName(\"label_50\")\n self.label_51 = QtWidgets.QLabel(self.page_6)\n self.label_51.setGeometry(QtCore.QRect(90, 560, 721, 41))\n self.label_51.setStyleSheet(\"QLabel#label_51{\\n\"\n\"color: white;\\n\"\n\"}\")\n self.label_51.setObjectName(\"label_51\")\n self.label_52 = QtWidgets.QLabel(self.page_6)\n self.label_52.setGeometry(QtCore.QRect(90, 610, 721, 41))\n self.label_52.setStyleSheet(\"QLabel#label_52{\\n\"\n\"color: white;\\n\"\n\"}\")\n self.label_52.setObjectName(\"label_52\")\n self.label_53 = QtWidgets.QLabel(self.page_6)\n self.label_53.setGeometry(QtCore.QRect(90, 660, 721, 41))\n self.label_53.setStyleSheet(\"QLabel#label_53{\\n\"\n\"color: white;\\n\"\n\"}\")\n self.label_53.setObjectName(\"label_53\")\n self.label_54 = QtWidgets.QLabel(self.page_6)\n self.label_54.setGeometry(QtCore.QRect(90, 710, 721, 41))\n self.label_54.setStyleSheet(\"QLabel#label_54{\\n\"\n\"color: white;\\n\"\n\"}\")\n self.label_54.setObjectName(\"label_54\")\n self.stackedWidget_2.addWidget(self.page_6)\n self.detect_image_page = QtWidgets.QWidget()\n self.detect_image_page.setStyleSheet(\"\")\n self.detect_image_page.setObjectName(\"detect_image_page\")\n self.openFilebtn = QtWidgets.QPushButton(self.detect_image_page)\n self.openFilebtn.setGeometry(QtCore.QRect(230, 60, 82, 25))\n font = QtGui.QFont()\n font.setFamily(\"Segoe WP Semibold\")\n font.setPointSize(9)\n font.setBold(True)\n font.setWeight(75)\n self.openFilebtn.setFont(font)\n self.openFilebtn.setStyleSheet(\"QPushButton#openFilebtn{\\n\"\n\" background-color: Transparent;\\n\"\n\" background-repeat:no-repeat;\\n\"\n\" border: 2px solid white;\\n\"\n\" color: white;\\n\"\n\"border-radius:8px;\\n\"\n\"\\n\"\n\"}\\n\"\n\"QPushButton:hover#openFilebtn{\\n\"\n\" background-color: white;\\n\"\n\" border-color: white;\\n\"\n\" color: black;\\n\"\n\" border: 2px solid white;\\n\"\n\"border-radius: 8px;\\n\"\n\"}\")\n self.openFilebtn.setObjectName(\"openFilebtn\")\n self.imagedetectbtn = QtWidgets.QPushButton(self.detect_image_page)\n self.imagedetectbtn.setGeometry(QtCore.QRect(330, 710, 251, 31))\n font = QtGui.QFont()\n font.setFamily(\"Segoe WP Semibold\")\n font.setPointSize(12)\n font.setBold(True)\n font.setWeight(75)\n self.imagedetectbtn.setFont(font)\n self.imagedetectbtn.setStyleSheet(\"QPushButton#imagedetectbtn{\\n\"\n\" background-color: Transparent;\\n\"\n\" background-repeat:no-repeat;\\n\"\n\" border: 2px solid white;\\n\"\n\" color: white;\\n\"\n\"border-radius:8px;\\n\"\n\"\\n\"\n\"}\\n\"\n\"QPushButton:hover#imagedetectbtn{\\n\"\n\" background-color: white;\\n\"\n\" border-color: white;\\n\"\n\" color: black;\\n\"\n\"border-radius: 8px;\\n\"\n\" border: 2px solid white;\\n\"\n\"}\")\n self.imagedetectbtn.setObjectName(\"imagedetectbtn\")\n self.label_11 = QtWidgets.QLabel(self.detect_image_page)\n self.label_11.setGeometry(QtCore.QRect(130, 130, 641, 551))\n font = QtGui.QFont()\n font.setFamily(\"Segoe WP Semibold\")\n font.setPointSize(15)\n font.setBold(True)\n font.setWeight(75)\n self.label_11.setFont(font)\n self.label_11.setStyleSheet(\"QLabel#label_11{\\n\"\n\"color: white;\\n\"\n\"}\")\n self.label_11.setTextFormat(QtCore.Qt.AutoText)\n self.label_11.setAlignment(QtCore.Qt.AlignCenter)\n self.label_11.setObjectName(\"label_11\")\n self.label_12 = QtWidgets.QLabel(self.detect_image_page)\n self.label_12.setGeometry(QtCore.QRect(120, 0, 681, 41))\n font = QtGui.QFont()\n font.setFamily(\"Segoe UI Semibold\")\n font.setPointSize(11)\n font.setBold(True)\n font.setWeight(75)\n self.label_12.setFont(font)\n self.label_12.setStyleSheet(\"QLabel#label_12{\\n\"\n\"color: white;\\n\"\n\"}\")\n self.label_12.setFrameShape(QtWidgets.QFrame.NoFrame)\n self.label_12.setObjectName(\"label_12\")\n self.setPathbtn = QtWidgets.QPushButton(self.detect_image_page)\n self.setPathbtn.setGeometry(QtCore.QRect(230, 90, 82, 25))\n font = QtGui.QFont()\n font.setFamily(\"Segoe UI Semibold\")\n font.setPointSize(9)\n font.setBold(True)\n font.setWeight(75)\n self.setPathbtn.setFont(font)\n self.setPathbtn.setStyleSheet(\"QPushButton#setPathbtn{\\n\"\n\" background-color: Transparent;\\n\"\n\" background-repeat:no-repeat;\\n\"\n\" border: 2px solid white;\\n\"\n\" color: white;\\n\"\n\"border-radius:8px;\\n\"\n\"\\n\"\n\"}\\n\"\n\"QPushButton:hover#setPathbtn{\\n\"\n\" background-color: white;\\n\"\n\" border-color: white;\\n\"\n\" color: black;\\n\"\n\"border-radius: 8px;\\n\"\n\" border: 2px solid white;\\n\"\n\"}\")\n self.setPathbtn.setObjectName(\"setPathbtn\")\n self.fileLabel = QtWidgets.QLabel(self.detect_image_page)\n self.fileLabel.setGeometry(QtCore.QRect(320, 62, 341, 21))\n font = QtGui.QFont()\n font.setFamily(\"Segoe WP Semibold\")\n font.setPointSize(9)\n font.setBold(True)\n font.setWeight(75)\n self.fileLabel.setFont(font)\n self.fileLabel.setStyleSheet(\"QLabel#fileLabel{\\n\"\n\"color: white;\\n\"\n\"border-radius:4px;\\n\"\n\" border: 2px solid white;\\n\"\n\"}\")\n self.fileLabel.setObjectName(\"fileLabel\")\n self.pathLabel = QtWidgets.QLabel(self.detect_image_page)\n self.pathLabel.setGeometry(QtCore.QRect(320, 92, 341, 21))\n font = QtGui.QFont()\n font.setFamily(\"Segoe UI Semibold\")\n font.setPointSize(9)\n font.setBold(True)\n font.setWeight(75)\n self.pathLabel.setFont(font)\n self.pathLabel.setStyleSheet(\"QLabel#pathLabel{\\n\"\n\"color: white;\\n\"\n\" border: 2px solid white;\\n\"\n\"border-radius:4px;\\n\"\n\"}\")\n self.pathLabel.setObjectName(\"pathLabel\")\n self.saveNotificationsImage = QtWidgets.QCheckBox(self.detect_image_page)\n self.saveNotificationsImage.setGeometry(QtCore.QRect(370, 680, 171, 21))\n font = QtGui.QFont()\n font.setFamily(\"Segoe WP Semibold\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n self.saveNotificationsImage.setFont(font)\n self.saveNotificationsImage.setStyleSheet(\"QCheckBox{\\n\"\n\"color: white;\\n\"\n\"}\")\n self.saveNotificationsImage.setObjectName(\"saveNotificationsImage\")\n self.label_21 = QtWidgets.QLabel(self.detect_image_page)\n self.label_21.setGeometry(QtCore.QRect(302, 750, 301, 31))\n font = QtGui.QFont()\n font.setFamily(\"Segoe UI Semibold\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n self.label_21.setFont(font)\n self.label_21.setStyleSheet(\"QLabel#label_21{\\n\"\n\"color: white;\\n\"\n\"}\")\n self.label_21.setText(\"\")\n self.label_21.setAlignment(QtCore.Qt.AlignCenter)\n self.label_21.setObjectName(\"label_21\")\n self.stackedWidget_2.addWidget(self.detect_image_page)\n self.detect_video_page = QtWidgets.QWidget()\n self.detect_video_page.setObjectName(\"detect_video_page\")\n self.saveNotificationsImage_3 = QtWidgets.QCheckBox(self.detect_video_page)\n self.saveNotificationsImage_3.setGeometry(QtCore.QRect(330, 470, 241, 21))\n font = QtGui.QFont()\n font.setFamily(\"Segoe WP Semibold\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n self.saveNotificationsImage_3.setFont(font)\n self.saveNotificationsImage_3.setStyleSheet(\"QCheckBox{\\n\"\n\"color: white;\\n\"\n\"}\")\n self.saveNotificationsImage_3.setObjectName(\"saveNotificationsImage_3\")\n self.detectCamera_2 = QtWidgets.QPushButton(self.detect_video_page)\n self.detectCamera_2.setGeometry(QtCore.QRect(320, 510, 251, 31))\n font = QtGui.QFont()\n font.setFamily(\"Segoe WP Semibold\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n self.detectCamera_2.setFont(font)\n self.detectCamera_2.setStyleSheet(\"QPushButton#detectCamera_2{\\n\"\n\" background-color: Transparent;\\n\"\n\" background-repeat:no-repeat;\\n\"\n\" border: 2px solid white;\\n\"\n\" color: white;\\n\"\n\"border-radius:8px;\\n\"\n\"\\n\"\n\"}\\n\"\n\"QPushButton:hover#detectCamera_2{\\n\"\n\" background-color: white;\\n\"\n\" border-color: white;\\n\"\n\" color: black;\\n\"\n\"border-radius: 8px;\\n\"\n\" border: 2px solid white;\\n\"\n\"}\")\n self.detectCamera_2.setObjectName(\"detectCamera_2\")\n self.label_20 = QtWidgets.QLabel(self.detect_video_page)\n self.label_20.setGeometry(QtCore.QRect(20, 120, 911, 121))\n font = QtGui.QFont()\n font.setFamily(\"Segoe UI Semibold\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n self.label_20.setFont(font)\n self.label_20.setStyleSheet(\"QLabel#label_20{\\n\"\n\"color: white;\\n\"\n\"}\")\n self.label_20.setAlignment(QtCore.Qt.AlignCenter)\n self.label_20.setObjectName(\"label_20\")\n self.errorvideo = QtWidgets.QLabel(self.detect_video_page)\n self.errorvideo.setGeometry(QtCore.QRect(290, 560, 311, 31))\n font = QtGui.QFont()\n font.setFamily(\"Segoe UI Semibold\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n self.errorvideo.setFont(font)\n self.errorvideo.setStyleSheet(\"QLabel#errorvideo{\\n\"\n\"color: white;\\n\"\n\"}\")\n self.errorvideo.setText(\"\")\n self.errorvideo.setAlignment(QtCore.Qt.AlignCenter)\n self.errorvideo.setObjectName(\"errorvideo\")\n self.fileLabel_2 = QtWidgets.QLabel(self.detect_video_page)\n self.fileLabel_2.setGeometry(QtCore.QRect(350, 332, 341, 21))\n font = QtGui.QFont()\n font.setFamily(\"Segoe WP Semibold\")\n font.setPointSize(9)\n font.setBold(True)\n font.setWeight(75)\n self.fileLabel_2.setFont(font)\n self.fileLabel_2.setStyleSheet(\"QLabel#fileLabel_2{\\n\"\n\"color: white;\\n\"\n\"border-radius:4px;\\n\"\n\" border: 2px solid white;\\n\"\n\"}\")\n self.fileLabel_2.setObjectName(\"fileLabel_2\")\n self.openFilebtn_2 = QtWidgets.QPushButton(self.detect_video_page)\n self.openFilebtn_2.setGeometry(QtCore.QRect(260, 330, 82, 25))\n font = QtGui.QFont()\n font.setFamily(\"Segoe WP Semibold\")\n font.setPointSize(9)\n font.setBold(True)\n font.setWeight(75)\n self.openFilebtn_2.setFont(font)\n self.openFilebtn_2.setStyleSheet(\"QPushButton#openFilebtn_2{\\n\"\n\" background-color: Transparent;\\n\"\n\" background-repeat:no-repeat;\\n\"\n\" border: 2px solid white;\\n\"\n\" color: white;\\n\"\n\"border-radius:8px;\\n\"\n\"\\n\"\n\"}\\n\"\n\"QPushButton:hover#openFilebtn_2{\\n\"\n\" background-color: white;\\n\"\n\" border-color: white;\\n\"\n\" color: black;\\n\"\n\" border: 2px solid white;\\n\"\n\"border-radius: 8px;\\n\"\n\"}\")\n self.openFilebtn_2.setObjectName(\"openFilebtn_2\")\n self.stackedWidget_2.addWidget(self.detect_video_page)\n self.detect_webcam_page = QtWidgets.QWidget()\n self.detect_webcam_page.setObjectName(\"detect_webcam_page\")\n self.label_10 = QtWidgets.QLabel(self.detect_webcam_page)\n self.label_10.setGeometry(QtCore.QRect(40, 40, 911, 121))\n font = QtGui.QFont()\n font.setFamily(\"Segoe UI Semibold\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n self.label_10.setFont(font)\n self.label_10.setStyleSheet(\"QLabel#label_10{\\n\"\n\"color: white;\\n\"\n\"}\")\n self.label_10.setObjectName(\"label_10\")\n self.label_13 = QtWidgets.QLabel(self.detect_webcam_page)\n self.label_13.setGeometry(QtCore.QRect(340, 220, 91, 21))\n font = QtGui.QFont()\n font.setFamily(\"Segoe WP Semibold\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n self.label_13.setFont(font)\n self.label_13.setStyleSheet(\"QLabel#label_13{\\n\"\n\"color:white;\\n\"\n\"}\")\n self.label_13.setObjectName(\"label_13\")\n self.label_14 = QtWidgets.QLabel(self.detect_webcam_page)\n self.label_14.setGeometry(QtCore.QRect(340, 300, 81, 31))\n font = QtGui.QFont()\n font.setFamily(\"Segoe WP Semibold\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n self.label_14.setFont(font)\n self.label_14.setStyleSheet(\"QLabel#label_14{\\n\"\n\"color:white;\\n\"\n\"}\")\n self.label_14.setObjectName(\"label_14\")\n self.detectCamera = QtWidgets.QPushButton(self.detect_webcam_page)\n self.detectCamera.setGeometry(QtCore.QRect(350, 410, 251, 31))\n font = QtGui.QFont()\n font.setFamily(\"Segoe WP Semibold\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n self.detectCamera.setFont(font)\n self.detectCamera.setStyleSheet(\"QPushButton#detectCamera{\\n\"\n\" background-color: Transparent;\\n\"\n\" background-repeat:no-repeat;\\n\"\n\" border: 2px solid white;\\n\"\n\" color: white;\\n\"\n\"border-radius:8px;\\n\"\n\"\\n\"\n\"}\\n\"\n\"QPushButton:hover#detectCamera{\\n\"\n\" background-color: white;\\n\"\n\" border-color: white;\\n\"\n\" color: black;\\n\"\n\"border-radius: 8px;\\n\"\n\" border: 2px solid white;\\n\"\n\"}\")\n self.detectCamera.setObjectName(\"detectCamera\")\n self.cameraerror = QtWidgets.QLabel(self.detect_webcam_page)\n self.cameraerror.setGeometry(QtCore.QRect(300, 380, 361, 21))\n font = QtGui.QFont()\n font.setFamily(\"Segoe UI Semibold\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n self.cameraerror.setFont(font)\n self.cameraerror.setStyleSheet(\"QLabel#cameraerror{\\n\"\n\"color:red;\\n\"\n\"}\")\n self.cameraerror.setText(\"\")\n self.cameraerror.setAlignment(QtCore.Qt.AlignCenter)\n self.cameraerror.setObjectName(\"cameraerror\")\n self.cameraIP = QtWidgets.QLineEdit(self.detect_webcam_page)\n self.cameraIP.setGeometry(QtCore.QRect(340, 250, 271, 31))\n self.cameraIP.setStyleSheet(\"QLineEdit#cameraIP\\n\"\n\"{\\n\"\n\"border-radius:8px;\\n\"\n\"padding-left: 5px;\\n\"\n\"}\")\n self.cameraIP.setObjectName(\"cameraIP\")\n self.locationCamera = QtWidgets.QLineEdit(self.detect_webcam_page)\n self.locationCamera.setGeometry(QtCore.QRect(340, 340, 271, 31))\n self.locationCamera.setStyleSheet(\"QLineEdit#locationCamera\\n\"\n\"{\\n\"\n\"border-radius:8px;\\n\"\n\"padding-left: 5px;\\n\"\n\"}\")\n self.locationCamera.setObjectName(\"locationCamera\")\n self.saveNotificationsImage_2 = QtWidgets.QCheckBox(self.detect_webcam_page)\n self.saveNotificationsImage_2.setGeometry(QtCore.QRect(350, 160, 241, 21))\n font = QtGui.QFont()\n font.setFamily(\"Segoe WP Semibold\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n self.saveNotificationsImage_2.setFont(font)\n self.saveNotificationsImage_2.setStyleSheet(\"QCheckBox{\\n\"\n\"color: white;\\n\"\n\"}\")\n self.saveNotificationsImage_2.setObjectName(\"saveNotificationsImage_2\")\n self.label_9 = QtWidgets.QLabel(self.detect_webcam_page)\n self.label_9.setGeometry(QtCore.QRect(350, 460, 241, 31))\n font = QtGui.QFont()\n font.setFamily(\"Segoe UI Semibold\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n self.label_9.setFont(font)\n self.label_9.setStyleSheet(\"QLabel#label_9{\\n\"\n\"color: white;\\n\"\n\"}\")\n self.label_9.setText(\"\")\n self.label_9.setAlignment(QtCore.Qt.AlignCenter)\n self.label_9.setObjectName(\"label_9\")\n self.stackedWidget_2.addWidget(self.detect_webcam_page)\n self.stackedWidget.addWidget(self.first_page)\n self.login_page = QtWidgets.QWidget()\n self.login_page.setStyleSheet(\"QWidget#login_page{\\n\"\n\"background-color: #464646;\\n\"\n\"}\")\n self.login_page.setObjectName(\"login_page\")\n self.groupBox = QtWidgets.QGroupBox(self.login_page)\n self.groupBox.setGeometry(QtCore.QRect(190, 170, 861, 481))\n font = QtGui.QFont()\n font.setFamily(\"Modern\")\n self.groupBox.setFont(font)\n self.groupBox.setStyleSheet(\"QGroupBox{\\n\"\n\"background: #FFFFFF;\\n\"\n\"box-shadow: 0 0 20px 0 rgba(0, 0, 0, 0.2), 0 5px 5px 0 rgba(0, 0, 0, 0.24);\\n\"\n\" border-radius: 10px;\\n\"\n\"}\")\n self.groupBox.setTitle(\"\")\n self.groupBox.setAlignment(QtCore.Qt.AlignCenter)\n self.groupBox.setObjectName(\"groupBox\")\n self.LoginButton = QtWidgets.QPushButton(self.groupBox)\n self.LoginButton.setGeometry(QtCore.QRect(510, 270, 221, 31))\n font = QtGui.QFont()\n font.setFamily(\"Segoe WP Semibold\")\n font.setBold(True)\n font.setWeight(75)\n self.LoginButton.setFont(font)\n self.LoginButton.setStyleSheet(\"QPushButton#LoginButton {\\n\"\n\" background-color: #4CAF50;\\n\"\n\" color: white;\\n\"\n\" border-radius: 10px;\\n\"\n\"}\\n\"\n\"\\n\"\n\"QPushButton:hover#LoginButton {\\n\"\n\" background-color: #60ba64;\\n\"\n\"}\")\n self.LoginButton.setObjectName(\"LoginButton\")\n self.label_2 = QtWidgets.QLabel(self.groupBox)\n self.label_2.setGeometry(QtCore.QRect(510, 235, 31, 21))\n self.label_2.setStyleSheet(\"image: url(./images/padlock.png);\")\n self.label_2.setText(\"\")\n self.label_2.setPixmap(QtGui.QPixmap(\"./password/padlock.png\"))\n self.label_2.setObjectName(\"label_2\")\n self.label_3 = QtWidgets.QLabel(self.groupBox)\n self.label_3.setGeometry(QtCore.QRect(510, 195, 31, 21))\n self.label_3.setStyleSheet(\"QLabel{\\n\"\n\"image: url(./images/boy.png);\\n\"\n\"}\")\n self.label_3.setText(\"\")\n self.label_3.setPixmap(QtGui.QPixmap(\"./images/boy.png\"))\n self.label_3.setObjectName(\"label_3\")\n self.label_4 = QtWidgets.QLabel(self.groupBox)\n self.label_4.setGeometry(QtCore.QRect(510, 130, 221, 51))\n font = QtGui.QFont()\n font.setFamily(\"Segoe WP Semibold\")\n font.setPointSize(17)\n font.setBold(True)\n font.setWeight(75)\n self.label_4.setFont(font)\n self.label_4.setAlignment(QtCore.Qt.AlignCenter)\n self.label_4.setObjectName(\"label_4\")\n self.lineEdit = QtWidgets.QLineEdit(self.groupBox)\n self.lineEdit.setGeometry(QtCore.QRect(510, 190, 221, 31))\n self.lineEdit.setStyleSheet(\"QLineEdit {\\n\"\n\" border: 2px solid gray;\\n\"\n\" border-radius: 10px;\\n\"\n\"padding-left: 25px;\\n\"\n\"}\")\n self.lineEdit.setObjectName(\"lineEdit\")\n self.lineEdit_2 = QtWidgets.QLineEdit(self.groupBox)\n self.lineEdit_2.setGeometry(QtCore.QRect(510, 230, 221, 31))\n self.lineEdit_2.setStyleSheet(\"QLineEdit {\\n\"\n\" border: 2px solid gray;\\n\"\n\" border-radius: 10px;\\n\"\n\" padding-left: 25px;\\n\"\n\"}\\n\"\n\"\")\n self.lineEdit_2.setMaxLength(16)\n self.lineEdit_2.setEchoMode(QtWidgets.QLineEdit.Password)\n self.lineEdit_2.setObjectName(\"lineEdit_2\")\n self.label_17 = QtWidgets.QLabel(self.groupBox)\n self.label_17.setGeometry(QtCore.QRect(510, 320, 221, 21))\n font = QtGui.QFont()\n font.setFamily(\"Segoe WP Semibold\")\n font.setBold(True)\n font.setWeight(75)\n self.label_17.setFont(font)\n self.label_17.setStyleSheet(\"QLabel#label_17{\\n\"\n\" color: red;\\n\"\n\"}\")\n self.label_17.setText(\"\")\n self.label_17.setAlignment(QtCore.Qt.AlignCenter)\n self.label_17.setObjectName(\"label_17\")\n self.label_18 = QtWidgets.QLabel(self.groupBox)\n self.label_18.setGeometry(QtCore.QRect(90, 90, 261, 341))\n self.label_18.setText(\"\")\n self.label_18.setPixmap(QtGui.QPixmap(\"./images/detection.png\"))\n self.label_18.setObjectName(\"label_18\")\n self.label = QtWidgets.QLabel(self.groupBox)\n self.label.setGeometry(QtCore.QRect(110, 50, 221, 31))\n font = QtGui.QFont()\n font.setFamily(\"Segoe WP Semibold\")\n font.setPointSize(14)\n font.setBold(True)\n font.setWeight(75)\n self.label.setFont(font)\n self.label.setAlignment(QtCore.Qt.AlignCenter)\n self.label.setObjectName(\"label\")\n self.line = QtWidgets.QFrame(self.groupBox)\n self.line.setGeometry(QtCore.QRect(400, 40, 20, 391))\n self.line.setFrameShape(QtWidgets.QFrame.VLine)\n self.line.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.line.setObjectName(\"line\")\n self.signbtn = QtWidgets.QPushButton(self.groupBox)\n self.signbtn.setGeometry(QtCore.QRect(510, 440, 221, 31))\n font = QtGui.QFont()\n font.setPointSize(10)\n font.setUnderline(True)\n self.signbtn.setFont(font)\n self.signbtn.setStyleSheet(\"QPushButton#signbtn{\\n\"\n\"background-color: Transparent;background-repeat:no-repeat;\\n\"\n\"color: #A0A0A0;\\n\"\n\"\\n\"\n\"}\\n\"\n\"QPushButton:hover#signbtn{\\n\"\n\" color: blue;\\n\"\n\"}\")\n self.signbtn.setObjectName(\"signbtn\")\n self.LoginButton.raise_()\n self.label_4.raise_()\n self.lineEdit.raise_()\n self.label_17.raise_()\n self.lineEdit_2.raise_()\n self.label_2.raise_()\n self.label_18.raise_()\n self.label.raise_()\n self.label_3.raise_()\n self.line.raise_()\n self.signbtn.raise_()\n self.stackedWidget.addWidget(self.login_page)\n self.page_3 = QtWidgets.QWidget()\n self.page_3.setStyleSheet(\"QWidget#page_3{\\n\"\n\"background-color: #464646;\\n\"\n\"}\")\n self.page_3.setObjectName(\"page_3\")\n self.groupBox_2 = QtWidgets.QGroupBox(self.page_3)\n self.groupBox_2.setGeometry(QtCore.QRect(200, 170, 861, 481))\n font = QtGui.QFont()\n font.setFamily(\"Modern\")\n self.groupBox_2.setFont(font)\n self.groupBox_2.setStyleSheet(\"QGroupBox{\\n\"\n\"background: #FFFFFF;\\n\"\n\"box-shadow: 0 0 20px 0 rgba(0, 0, 0, 0.2), 0 5px 5px 0 rgba(0, 0, 0, 0.24);\\n\"\n\" border-radius: 10px;\\n\"\n\"}\")\n self.groupBox_2.setTitle(\"\")\n self.groupBox_2.setAlignment(QtCore.Qt.AlignCenter)\n self.groupBox_2.setObjectName(\"groupBox_2\")\n self.createAccountbtn = QtWidgets.QPushButton(self.groupBox_2)\n self.createAccountbtn.setGeometry(QtCore.QRect(580, 430, 251, 31))\n font = QtGui.QFont()\n font.setFamily(\"Segoe WP Semibold\")\n font.setBold(True)\n font.setWeight(75)\n self.createAccountbtn.setFont(font)\n self.createAccountbtn.setStyleSheet(\"QPushButton#createAccountbtn {\\n\"\n\" background-color: #4CAF50;\\n\"\n\" color: white;\\n\"\n\" border-radius: 10px;\\n\"\n\"}\\n\"\n\"\\n\"\n\"QPushButton:hover#createAccountbtn {\\n\"\n\" background-color: #60ba64;\\n\"\n\"}\")\n self.createAccountbtn.setObjectName(\"createAccountbtn\")\n self.label_8 = QtWidgets.QLabel(self.groupBox_2)\n self.label_8.setGeometry(QtCore.QRect(320, 320, 31, 21))\n self.label_8.setStyleSheet(\"image: url(./images/padlock.png);\")\n self.label_8.setText(\"\")\n self.label_8.setPixmap(QtGui.QPixmap(\"./password/padlock.png\"))\n self.label_8.setObjectName(\"label_8\")\n self.label_15 = QtWidgets.QLabel(self.groupBox_2)\n self.label_15.setGeometry(QtCore.QRect(320, 280, 31, 21))\n self.label_15.setStyleSheet(\"QLabel{\\n\"\n\"image: url(./images/boy.png);\\n\"\n\"}\")\n self.label_15.setText(\"\")\n self.label_15.setPixmap(QtGui.QPixmap(\"./images/boy.png\"))\n self.label_15.setObjectName(\"label_15\")\n self.label_19 = QtWidgets.QLabel(self.groupBox_2)\n self.label_19.setGeometry(QtCore.QRect(300, 60, 131, 51))\n font = QtGui.QFont()\n font.setFamily(\"Segoe WP Semibold\")\n font.setPointSize(14)\n font.setBold(True)\n font.setWeight(75)\n self.label_19.setFont(font)\n self.label_19.setAlignment(QtCore.Qt.AlignCenter)\n self.label_19.setObjectName(\"label_19\")\n self.usernamelabel = QtWidgets.QLineEdit(self.groupBox_2)\n self.usernamelabel.setGeometry(QtCore.QRect(320, 276, 221, 31))\n self.usernamelabel.setStyleSheet(\"QLineEdit {\\n\"\n\" border: 2px solid gray;\\n\"\n\" border-radius: 10px;\\n\"\n\"padding-left: 25px;\\n\"\n\"}\")\n self.usernamelabel.setObjectName(\"usernamelabel\")\n self.passwordlabel = QtWidgets.QLineEdit(self.groupBox_2)\n self.passwordlabel.setGeometry(QtCore.QRect(320, 316, 221, 31))\n self.passwordlabel.setStyleSheet(\"QLineEdit {\\n\"\n\" border: 2px solid gray;\\n\"\n\" border-radius: 10px;\\n\"\n\" padding-left: 25px;\\n\"\n\"}\\n\"\n\"\")\n self.passwordlabel.setMaxLength(16)\n self.passwordlabel.setEchoMode(QtWidgets.QLineEdit.Password)\n self.passwordlabel.setObjectName(\"passwordlabel\")\n self.label_23 = QtWidgets.QLabel(self.groupBox_2)\n self.label_23.setGeometry(QtCore.QRect(30, 100, 261, 351))\n self.label_23.setText(\"\")\n self.label_23.setPixmap(QtGui.QPixmap(\"./images/detection.png\"))\n self.label_23.setObjectName(\"label_23\")\n self.label_24 = QtWidgets.QLabel(self.groupBox_2)\n self.label_24.setGeometry(QtCore.QRect(30, 50, 211, 31))\n font = QtGui.QFont()\n font.setFamily(\"Segoe WP Semibold\")\n font.setPointSize(14)\n font.setBold(True)\n font.setWeight(75)\n self.label_24.setFont(font)\n self.label_24.setAlignment(QtCore.Qt.AlignCenter)\n self.label_24.setObjectName(\"label_24\")\n self.line_7 = QtWidgets.QFrame(self.groupBox_2)\n self.line_7.setGeometry(QtCore.QRect(280, 50, 20, 391))\n self.line_7.setFrameShape(QtWidgets.QFrame.VLine)\n self.line_7.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.line_7.setObjectName(\"line_7\")\n self.passwordlabel2 = QtWidgets.QLineEdit(self.groupBox_2)\n self.passwordlabel2.setGeometry(QtCore.QRect(320, 356, 221, 31))\n self.passwordlabel2.setStyleSheet(\"QLineEdit {\\n\"\n\" border: 2px solid gray;\\n\"\n\" border-radius: 10px;\\n\"\n\" padding-left: 25px;\\n\"\n\"}\\n\"\n\"\")\n self.passwordlabel2.setMaxLength(16)\n self.passwordlabel2.setEchoMode(QtWidgets.QLineEdit.Password)\n self.passwordlabel2.setObjectName(\"passwordlabel2\")\n self.firstnamelabel = QtWidgets.QLineEdit(self.groupBox_2)\n self.firstnamelabel.setGeometry(QtCore.QRect(320, 110, 221, 31))\n self.firstnamelabel.setStyleSheet(\"QLineEdit {\\n\"\n\" border: 2px solid gray;\\n\"\n\" border-radius: 10px;\\n\"\n\" padding-left: 25px;\\n\"\n\"}\\n\"\n\"\")\n self.firstnamelabel.setMaxLength(16)\n self.firstnamelabel.setEchoMode(QtWidgets.QLineEdit.Normal)\n self.firstnamelabel.setObjectName(\"firstnamelabel\")\n self.surnamelabel = QtWidgets.QLineEdit(self.groupBox_2)\n self.surnamelabel.setGeometry(QtCore.QRect(320, 150, 221, 31))\n self.surnamelabel.setStyleSheet(\"QLineEdit {\\n\"\n\" border: 2px solid gray;\\n\"\n\" border-radius: 10px;\\n\"\n\" padding-left: 25px;\\n\"\n\"}\\n\"\n\"\")\n self.surnamelabel.setMaxLength(16)\n self.surnamelabel.setEchoMode(QtWidgets.QLineEdit.Normal)\n self.surnamelabel.setObjectName(\"surnamelabel\")\n self.emaillabel = QtWidgets.QLineEdit(self.groupBox_2)\n self.emaillabel.setGeometry(QtCore.QRect(320, 190, 221, 31))\n self.emaillabel.setStyleSheet(\"QLineEdit {\\n\"\n\" border: 2px solid gray;\\n\"\n\" border-radius: 10px;\\n\"\n\" padding-left: 25px;\\n\"\n\"}\\n\"\n\"\")\n self.emaillabel.setMaxLength(30)\n self.emaillabel.setEchoMode(QtWidgets.QLineEdit.Normal)\n self.emaillabel.setObjectName(\"emaillabel\")\n self.label_25 = QtWidgets.QLabel(self.groupBox_2)\n self.label_25.setGeometry(QtCore.QRect(320, 360, 31, 21))\n self.label_25.setStyleSheet(\"image: url(./images/padlock.png);\")\n self.label_25.setText(\"\")\n self.label_25.setPixmap(QtGui.QPixmap(\"./password/padlock.png\"))\n self.label_25.setObjectName(\"label_25\")\n self.label_26 = QtWidgets.QLabel(self.groupBox_2)\n self.label_26.setGeometry(QtCore.QRect(290, 230, 161, 51))\n font = QtGui.QFont()\n font.setFamily(\"Segoe WP Semibold\")\n font.setPointSize(14)\n font.setBold(True)\n font.setWeight(75)\n self.label_26.setFont(font)\n self.label_26.setAlignment(QtCore.Qt.AlignCenter)\n self.label_26.setObjectName(\"label_26\")\n self.label_27 = QtWidgets.QLabel(self.groupBox_2)\n self.label_27.setGeometry(QtCore.QRect(470, 10, 181, 41))\n font = QtGui.QFont()\n font.setFamily(\"Segoe WP Semibold\")\n font.setPointSize(14)\n font.setBold(True)\n font.setWeight(75)\n self.label_27.setFont(font)\n self.label_27.setAlignment(QtCore.Qt.AlignCenter)\n self.label_27.setObjectName(\"label_27\")\n self.passwordBtn = QtWidgets.QPushButton(self.groupBox_2)\n self.passwordBtn.setGeometry(QtCore.QRect(590, 224, 271, 31))\n font = QtGui.QFont()\n font.setFamily(\"Segoe WP Semibold\")\n font.setPointSize(9)\n font.setBold(True)\n font.setWeight(75)\n self.passwordBtn.setFont(font)\n self.passwordBtn.setStyleSheet(\"QPushButton#passwordBtn\\n\"\n\"{\\n\"\n\"background-color: Transparent;background-repeat:no-repeat;\\n\"\n\"\\n\"\n\"Text-align:left;\\n\"\n\"}\\n\"\n\"\")\n self.passwordBtn.setObjectName(\"passwordBtn\")\n self.usernameBtn = QtWidgets.QPushButton(self.groupBox_2)\n self.usernameBtn.setGeometry(QtCore.QRect(590, 184, 271, 31))\n font = QtGui.QFont()\n font.setFamily(\"Segoe UI Semibold\")\n font.setPointSize(9)\n font.setBold(True)\n font.setWeight(75)\n self.usernameBtn.setFont(font)\n self.usernameBtn.setStyleSheet(\"QPushButton#usernameBtn\\n\"\n\"{\\n\"\n\"background-color: Transparent;background-repeat:no-repeat;\\n\"\n\"\\n\"\n\"Text-align:left;\\n\"\n\"}\\n\"\n\"\")\n self.usernameBtn.setObjectName(\"usernameBtn\")\n self.emailBtn = QtWidgets.QPushButton(self.groupBox_2)\n self.emailBtn.setGeometry(QtCore.QRect(590, 144, 271, 31))\n font = QtGui.QFont()\n font.setFamily(\"Segoe UI Semibold\")\n font.setPointSize(9)\n font.setBold(True)\n font.setWeight(75)\n self.emailBtn.setFont(font)\n self.emailBtn.setStyleSheet(\"QPushButton#emailBtn{\\n\"\n\"background-color: Transparent;background-repeat:no-repeat;\\n\"\n\"\\n\"\n\"Text-align:left;\\n\"\n\"}\\n\"\n\"\")\n self.emailBtn.setObjectName(\"emailBtn\")\n self.emailImg = QtWidgets.QLabel(self.groupBox_2)\n self.emailImg.setGeometry(QtCore.QRect(565, 150, 21, 21))\n self.emailImg.setText(\"\")\n self.emailImg.setPixmap(QtGui.QPixmap(\"./images/wrongcross.png\"))\n self.emailImg.setScaledContents(True)\n self.emailImg.setObjectName(\"emailImg\")\n self.passwordImg = QtWidgets.QLabel(self.groupBox_2)\n self.passwordImg.setGeometry(QtCore.QRect(565, 230, 21, 21))\n self.passwordImg.setText(\"\")\n self.passwordImg.setPixmap(QtGui.QPixmap(\"./images/wrongcross.png\"))\n self.passwordImg.setScaledContents(True)\n self.passwordImg.setObjectName(\"passwordImg\")\n self.UsernameImg = QtWidgets.QLabel(self.groupBox_2)\n self.UsernameImg.setGeometry(QtCore.QRect(565, 190, 21, 21))\n self.UsernameImg.setText(\"\")\n self.UsernameImg.setPixmap(QtGui.QPixmap(\"./images/wrongcross.png\"))\n self.UsernameImg.setScaledContents(True)\n self.UsernameImg.setObjectName(\"UsernameImg\")\n self.passwordBtn_2 = QtWidgets.QPushButton(self.groupBox_2)\n self.passwordBtn_2.setGeometry(QtCore.QRect(590, 264, 271, 31))\n font = QtGui.QFont()\n font.setFamily(\"Segoe UI Semibold\")\n font.setPointSize(9)\n font.setBold(True)\n font.setWeight(75)\n self.passwordBtn_2.setFont(font)\n self.passwordBtn_2.setStyleSheet(\"QPushButton#passwordBtn_2\\n\"\n\"{\\n\"\n\"background-color: Transparent;background-repeat:no-repeat;\\n\"\n\"\\n\"\n\"Text-align:left;\\n\"\n\"}\\n\"\n\"\")\n self.passwordBtn_2.setObjectName(\"passwordBtn_2\")\n self.passwordImg_2 = QtWidgets.QLabel(self.groupBox_2)\n self.passwordImg_2.setGeometry(QtCore.QRect(565, 270, 21, 21))\n self.passwordImg_2.setText(\"\")\n self.passwordImg_2.setPixmap(QtGui.QPixmap(\"./images/wrongcross.png\"))\n self.passwordImg_2.setScaledContents(True)\n self.passwordImg_2.setObjectName(\"passwordImg_2\")\n self.passwordImg_3 = QtWidgets.QLabel(self.groupBox_2)\n self.passwordImg_3.setGeometry(QtCore.QRect(565, 306, 21, 21))\n self.passwordImg_3.setText(\"\")\n self.passwordImg_3.setPixmap(QtGui.QPixmap(\"./images/wrongcross.png\"))\n self.passwordImg_3.setScaledContents(True)\n self.passwordImg_3.setObjectName(\"passwordImg_3\")\n self.passwordBtn_3 = QtWidgets.QPushButton(self.groupBox_2)\n self.passwordBtn_3.setGeometry(QtCore.QRect(590, 300, 271, 31))\n font = QtGui.QFont()\n font.setFamily(\"Segoe UI Semibold\")\n font.setPointSize(9)\n font.setBold(True)\n font.setWeight(75)\n self.passwordBtn_3.setFont(font)\n self.passwordBtn_3.setStyleSheet(\"QPushButton#passwordBtn_3\\n\"\n\"{\\n\"\n\"background-color: Transparent;background-repeat:no-repeat;\\n\"\n\"\\n\"\n\"Text-align:left;\\n\"\n\"}\\n\"\n\"\")\n self.passwordBtn_3.setObjectName(\"passwordBtn_3\")\n self.passwordImg_4 = QtWidgets.QLabel(self.groupBox_2)\n self.passwordImg_4.setGeometry(QtCore.QRect(565, 340, 21, 21))\n self.passwordImg_4.setText(\"\")\n self.passwordImg_4.setPixmap(QtGui.QPixmap(\"./images/wrongcross.png\"))\n self.passwordImg_4.setScaledContents(True)\n self.passwordImg_4.setObjectName(\"passwordImg_4\")\n self.passwordBtn_4 = QtWidgets.QPushButton(self.groupBox_2)\n self.passwordBtn_4.setGeometry(QtCore.QRect(590, 334, 271, 31))\n font = QtGui.QFont()\n font.setFamily(\"Segoe WP Semibold\")\n font.setPointSize(9)\n font.setBold(True)\n font.setWeight(75)\n self.passwordBtn_4.setFont(font)\n self.passwordBtn_4.setStyleSheet(\"QPushButton#passwordBtn_4\\n\"\n\"{\\n\"\n\"background-color: Transparent;background-repeat:no-repeat;\\n\"\n\"\\n\"\n\"Text-align:left;\\n\"\n\"}\\n\"\n\"\")\n self.passwordBtn_4.setObjectName(\"passwordBtn_4\")\n self.label_5 = QtWidgets.QLabel(self.groupBox_2)\n self.label_5.setGeometry(QtCore.QRect(570, 110, 61, 31))\n font = QtGui.QFont()\n font.setFamily(\"Segoe UI Semibold\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n self.label_5.setFont(font)\n self.label_5.setObjectName(\"label_5\")\n self.createAccountbtn.raise_()\n self.label_19.raise_()\n self.usernamelabel.raise_()\n self.passwordlabel.raise_()\n self.label_23.raise_()\n self.label_24.raise_()\n self.line_7.raise_()\n self.passwordlabel2.raise_()\n self.firstnamelabel.raise_()\n self.surnamelabel.raise_()\n self.emaillabel.raise_()\n self.label_15.raise_()\n self.label_8.raise_()\n self.label_25.raise_()\n self.label_26.raise_()\n self.label_27.raise_()\n self.passwordBtn.raise_()\n self.usernameBtn.raise_()\n self.emailBtn.raise_()\n self.emailImg.raise_()\n self.passwordImg.raise_()\n self.UsernameImg.raise_()\n self.passwordBtn_2.raise_()\n self.passwordImg_2.raise_()\n self.passwordImg_3.raise_()\n self.passwordBtn_3.raise_()\n self.passwordImg_4.raise_()\n self.passwordBtn_4.raise_()\n self.label_5.raise_()\n self.stackedWidget.addWidget(self.page_3)\n MainWindow.setCentralWidget(self.centralwidget)\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setObjectName(\"statusbar\")\n MainWindow.setStatusBar(self.statusbar)\n self.actionQuit = QtWidgets.QAction(MainWindow)\n self.actionQuit.setObjectName(\"actionQuit\")\n\n self.retranslateUi(MainWindow)\n self.stackedWidget.setCurrentIndex(1)\n self.stackedWidget_4.setCurrentIndex(0)\n self.stackedWidget_2.setCurrentIndex(3)\n self.stackedWidget_3.setCurrentIndex(1)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"Smoke detector\"))\n self.usernotifications.setText(_translate(\"MainWindow\", \" Notifications\"))\n self.webcamDetect.setText(_translate(\"MainWindow\", \" Detect webcam\"))\n self.detectimage.setText(_translate(\"MainWindow\", \" Detect Image\"))\n self.detectvideo.setText(_translate(\"MainWindow\", \" Detect video\"))\n self.userSettings.setText(_translate(\"MainWindow\", \" Settings\"))\n self.logout.setText(_translate(\"MainWindow\", \" Logout\"))\n self.smokeTitle.setText(_translate(\"MainWindow\", \" Smoke detector\"))\n self.label_34.setText(_translate(\"MainWindow\", \"Notification filter\"))\n self.label_35.setText(_translate(\"MainWindow\", \"Detection filter: \"))\n self.checkBox.setText(_translate(\"MainWindow\", \"Image\"))\n self.checkBox_2.setText(_translate(\"MainWindow\", \"Video\"))\n self.checkBox_3.setText(_translate(\"MainWindow\", \"Webcam\"))\n self.label_36.setText(_translate(\"MainWindow\", \"Date filter: \"))\n self.label_37.setText(_translate(\"MainWindow\", \"to\"))\n self.filterbtn.setText(_translate(\"MainWindow\", \"Filter\"))\n self.label_38.setText(_translate(\"MainWindow\", \"For one date filter leave any as default\"))\n self.homebutton.setText(_translate(\"MainWindow\", \" Home\"))\n self.changeUserInfobtn.setText(_translate(\"MainWindow\", \"Change user info\"))\n self.Changepasswordbtn.setText(_translate(\"MainWindow\", \"Change password\"))\n self.label_28.setText(_translate(\"MainWindow\", \"Email:\"))\n self.label_16.setText(_translate(\"MainWindow\", \"First name:\"))\n self.label_22.setText(_translate(\"MainWindow\", \"Surname:\"))\n self.saveuserinfobtn.setText(_translate(\"MainWindow\", \"save user info\"))\n self.label_29.setText(_translate(\"MainWindow\", \"Current password\"))\n self.label_30.setText(_translate(\"MainWindow\", \"New password\"))\n self.label_31.setText(_translate(\"MainWindow\", \"re-enter new password\"))\n self.savenewpasswordbtn.setText(_translate(\"MainWindow\", \"save new password\"))\n self.label_7.setText(_translate(\"MainWindow\", \"Notifications\"))\n self.backToNotification.setText(_translate(\"MainWindow\", \" Back\"))\n self.label_39.setText(_translate(\"MainWindow\", \"Home\"))\n self.label_41.setText(_translate(\"MainWindow\", \"Information about the application:\"))\n self.label_42.setText(_translate(\"MainWindow\", \"• The application provides user with the automatic detection of smokers within a given location. \"))\n self.label_43.setText(_translate(\"MainWindow\", \"• Due the processing power required, adequate features might not be avaliable or function as implemented on commondity PCs.\"))\n self.label_44.setText(_translate(\"MainWindow\", \"• This is application is intended for industry use or the prevention of smoking in strictly prohibited areas, such as schools. \"))\n self.label_45.setText(_translate(\"MainWindow\", \"• Due to the high requiring processing power, the use of the application requires NVIDIA graphics card in order to function flawlessly. \"))\n self.label_47.setText(_translate(\"MainWindow\", \"Features provided:\"))\n self.label_48.setText(_translate(\"MainWindow\", \"• Automatic detection of smokers within a uploaded live webcam from the user. \"))\n self.label_49.setText(_translate(\"MainWindow\", \"• Automatic detection of smokers within a uploaded video from the user.\"))\n self.label_50.setText(_translate(\"MainWindow\", \"• Automatic detection of smokers within a uploaded image from the user.\"))\n self.label_51.setText(_translate(\"MainWindow\", \"• Every 30 seconds a screenshot is taken for a uploaded video/webcam if it detects a smoker.\"))\n self.label_52.setText(_translate(\"MainWindow\", \"• Screenshots of detected smokers are saved in notifications and illustrates, the date, time and type of detection. \"))\n self.label_53.setText(_translate(\"MainWindow\", \"• Screenshots are also saved in the users local folders. \"))\n self.label_54.setText(_translate(\"MainWindow\", \"• Screenshots can be enlarged, saved and deleted in the notificiation page.\"))\n self.openFilebtn.setText(_translate(\"MainWindow\", \"Open file\"))\n self.imagedetectbtn.setText(_translate(\"MainWindow\", \"Detect\"))\n self.label_11.setText(_translate(\"MainWindow\", \"Upload image\"))\n self.label_12.setText(_translate(\"MainWindow\", \"Upload any image .jpg, .png etc and set a path to where you wish the image to be loaded\"))\n self.setPathbtn.setText(_translate(\"MainWindow\", \"Set path\"))\n self.fileLabel.setText(_translate(\"MainWindow\", \"Open file path\"))\n self.pathLabel.setText(_translate(\"MainWindow\", \"set file path\"))\n self.saveNotificationsImage.setText(_translate(\"MainWindow\", \"Save in notifications\"))\n self.saveNotificationsImage_3.setText(_translate(\"MainWindow\", \"Save detections in notifications\"))\n self.detectCamera_2.setText(_translate(\"MainWindow\", \"Detect video\"))\n self.label_20.setText(_translate(\"MainWindow\", \"Add the video path in order to start detected smokers within a video, detections will be saved in videoimages folder\"))\n self.fileLabel_2.setText(_translate(\"MainWindow\", \"Open file path\"))\n self.openFilebtn_2.setText(_translate(\"MainWindow\", \"Open file\"))\n self.label_10.setText(_translate(\"MainWindow\", \"Type in the camera IP to open up a new window that will detect at that location. If you wish to detect through webcam the IP is 0.\"))\n self.label_13.setText(_translate(\"MainWindow\", \"Camera IP:\"))\n self.label_14.setText(_translate(\"MainWindow\", \"Location:\"))\n self.detectCamera.setText(_translate(\"MainWindow\", \"Detect camera\"))\n self.saveNotificationsImage_2.setText(_translate(\"MainWindow\", \"Save detections in notifications\"))\n self.LoginButton.setText(_translate(\"MainWindow\", \"LOGIN\"))\n self.label_4.setText(_translate(\"MainWindow\", \"Member Login\"))\n self.lineEdit.setPlaceholderText(_translate(\"MainWindow\", \"Username\"))\n self.lineEdit_2.setPlaceholderText(_translate(\"MainWindow\", \"Password\"))\n self.label.setText(_translate(\"MainWindow\", \"Detect smokers\"))\n self.signbtn.setText(_translate(\"MainWindow\", \"Not a member? Sign up now\"))\n self.createAccountbtn.setText(_translate(\"MainWindow\", \"Create account\"))\n self.label_19.setText(_translate(\"MainWindow\", \"User info\"))\n self.usernamelabel.setPlaceholderText(_translate(\"MainWindow\", \"Username\"))\n self.passwordlabel.setPlaceholderText(_translate(\"MainWindow\", \"Password\"))\n self.label_24.setText(_translate(\"MainWindow\", \"Detect smokers\"))\n self.passwordlabel2.setPlaceholderText(_translate(\"MainWindow\", \"Re-enter password\"))\n self.firstnamelabel.setPlaceholderText(_translate(\"MainWindow\", \"First name\"))\n self.surnamelabel.setPlaceholderText(_translate(\"MainWindow\", \"Surname\"))\n self.emaillabel.setPlaceholderText(_translate(\"MainWindow\", \"Email\"))\n self.label_26.setText(_translate(\"MainWindow\", \"User login\"))\n self.label_27.setText(_translate(\"MainWindow\", \"Member signup\"))\n self.passwordBtn.setText(_translate(\"MainWindow\", \"Password must be longer than 4 characters\"))\n self.usernameBtn.setText(_translate(\"MainWindow\", \"Username must be unique\"))\n self.emailBtn.setText(_translate(\"MainWindow\", \"Email must be valid\"))\n self.passwordBtn_2.setText(_translate(\"MainWindow\", \"Password must have a capital letter\"))\n self.passwordBtn_3.setText(_translate(\"MainWindow\", \"Password must have a number\"))\n self.passwordBtn_4.setText(_translate(\"MainWindow\", \"Passwords must be the same\"))\n self.label_5.setText(_translate(\"MainWindow\", \"Steps:\"))\n self.actionQuit.setText(_translate(\"MainWindow\", \"Quit\"))\n" }, { "alpha_fraction": 0.621169924736023, "alphanum_fraction": 0.621169924736023, "avg_line_length": 34.900001525878906, "blob_id": "12ec5503c52da417b995af2cc8825d5db0ec14e7", "content_id": "a1081205b30cead20f259d0f3539381bb164171b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 359, "license_type": "no_license", "max_line_length": 94, "num_lines": 10, "path": "/designs/userprofile.py", "repo_name": "viathus/Image-object-detection-application", "src_encoding": "UTF-8", "text": "class User:\n def __init__(self,firstName,lastName, email, username, gender, age, password, imagePaths):\n self.firstName = firstName\n self.lastName = lastName\n self.email = email\n self.username = username\n self.gender = gender\n self.age = age\n self.password = password\n self.imagePaths = imagePaths\n" }, { "alpha_fraction": 0.676915168762207, "alphanum_fraction": 0.6907507181167603, "avg_line_length": 40.521278381347656, "blob_id": "5e97956dff11fce1955f640b6e95ea2ed2499428", "content_id": "398aefdedc26770f8705ea934ef92ad58e9c91c3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3903, "license_type": "no_license", "max_line_length": 110, "num_lines": 94, "path": "/designs/testWidget.py", "repo_name": "viathus/Image-object-detection-application", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# self implementation generated from reading ui file 'customWidget.ui'\n#\n# Created by: PyQt5 UI code generator 5.12.1\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\n\nclass test_widget(QtWidgets.QWidget):\n def __init__ (self, parent = None):\n super(test_widget, self).__init__(parent)\n self.setFixedSize(274, 288)\n self.setStyleSheet(\"QWidget{\\n\"\n\"padding-left: 5px;\\n\"\n\"}\")\n self.verticalLayout = QtWidgets.QVBoxLayout(self)\n self.verticalLayout.setObjectName(\"verticalLayout\")\n self.customWidgetImage = QtWidgets.QLabel(self)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Expanding)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.customWidgetImage.sizePolicy().hasHeightForWidth())\n self.customWidgetImage.setSizePolicy(sizePolicy)\n self.customWidgetImage.setMinimumSize(QtCore.QSize(20, 30))\n self.customWidgetImage.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)\n self.customWidgetImage.setObjectName(\"customWidgetImage\")\n self.verticalLayout.addWidget(self.customWidgetImage)\n self.customWidgetType = QtWidgets.QLabel(self)\n self.customWidgetType.setMinimumSize(QtCore.QSize(0, 20))\n font = QtGui.QFont()\n font.setFamily(\"Segoe WP Semibold\")\n font.setPointSize(9)\n font.setBold(True)\n font.setWeight(75)\n self.customWidgetType.setFont(font)\n self.customWidgetType.setStyleSheet(\"QLabel#customWidgetType{\\n\"\n \"color: white;\\n\"\n \"}\")\n self.customWidgetType.setObjectName(\"customWidgetType\")\n self.verticalLayout.addWidget(self.customWidgetType)\n self.customWidgetDate = QtWidgets.QLabel(self)\n self.customWidgetDate.setMinimumSize(QtCore.QSize(0, 20))\n font = QtGui.QFont()\n font.setFamily(\"Segoe WP Semibold\")\n font.setPointSize(9)\n font.setBold(True)\n font.setWeight(75)\n self.customWidgetDate.setFont(font)\n self.customWidgetDate.setStyleSheet(\"QLabel#customWidgetDate{\\n\"\n \"color: white;\\n\"\n \"}\")\n self.customWidgetDate.setObjectName(\"customWidgetDate\")\n self.verticalLayout.addWidget(self.customWidgetDate)\n self.customWidgetScore = QtWidgets.QLabel(self)\n self.customWidgetScore.setMinimumSize(QtCore.QSize(0, 20))\n font = QtGui.QFont()\n font.setFamily(\"Segoe WP Semibold\")\n font.setPointSize(9)\n font.setBold(True)\n font.setWeight(75)\n self.customWidgetScore.setFont(font)\n self.customWidgetScore.setStyleSheet(\"QLabel#customWidgetScore{\\n\"\n \"color: white;\\n\"\n \"}\")\n self.customWidgetScore.setObjectName(\"customWidgetScore\")\n self.verticalLayout.addWidget(self.customWidgetScore)\n QtCore.QMetaObject.connectSlotsByName(self)\n self.customWidgetImage.setText(\"Image\")\n self.customWidgetType.setText(\"Type\")\n self.customWidgetDate.setText(\"Date\")\n self.customWidgetScore.setText(\"Time\")\n\n\n\n def setLabelDate(self,text):\n self.customWidgetDate.setText(text)\n def setLabelTime(self,text):\n self.customWidgetScore.setText(text)\n def setLabelType(self,text):\n self.customWidgetType.setText(text)\n def setImage(self, text):\n pixmap1 = QtGui.QPixmap()\n pixmap1.loadFromData(text)\n pixmap = QtGui.QPixmap(pixmap1)\n scaled_pixmap = pixmap.scaled(200,200)\n self.customWidgetImage.setPixmap(QtGui.QPixmap(scaled_pixmap))\n\n def setImage1(self, text):\n pixmap = QtGui.QPixmap(text)\n scaled_pixmap = pixmap.scaled(200,200)\n self.customWidgetImage.setPixmap(QtGui.QPixmap(scaled_pixmap))\n" }, { "alpha_fraction": 0.8391608595848083, "alphanum_fraction": 0.8461538553237915, "avg_line_length": 14.88888931274414, "blob_id": "bf4ead8a9a76ddd079eb51a211c0a0cee5a58049", "content_id": "df1c9155ef624b89ff851b3dd6cffcf3ecabe0fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 143, "license_type": "no_license", "max_line_length": 43, "num_lines": 9, "path": "/designs/requirements-gpu.txt", "repo_name": "viathus/Image-object-detection-application", "src_encoding": "UTF-8", "text": "requests\npython-firebase\nPyQt5\n--ignore-installed --upgrade tensorflow-gpu\nvalidate_email\nPillow\nopencv-python\ngoogle-cloud-storage\nmatplotlib\n" }, { "alpha_fraction": 0.7782571315765381, "alphanum_fraction": 0.7782571315765381, "avg_line_length": 36.3870964050293, "blob_id": "d630d616296d5b416e5b6f5d0985e11962fde83d", "content_id": "25a73c29bd40c0624ac1691269d0857d08c5be86", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1159, "license_type": "no_license", "max_line_length": 183, "num_lines": 31, "path": "/README.md", "repo_name": "viathus/Image-object-detection-application", "src_encoding": "UTF-8", "text": "# A University Project!\n\n# Image-object-detection-application\nImage object detection and recognition \n\nHere are some screenshots if you don't plan on running the application, if so open the design folder there should be a different readme with instruction on how to run the application.\n\n\n### **Signup**\n\n![Image not found](https://github.com/viathus/Image-object-detection-application/blob/master/images/signup.png)\n\n### **Login**\n\n![Image not found](https://github.com/viathus/Image-object-detection-application/blob/master/images/login.png)\n\n### **Notifications**\n\n![Image not found](https://github.com/viathus/Image-object-detection-application/blob/master/images/notifications.png)\n\n### **Detect through images**\n\n![Image not found](https://github.com/viathus/Image-object-detection-application/blob/master/images/detectimage.png)\n\n### **Notable features**\n* Detect smoker within image\n* Detect smoker within video\n* Detect smoker within webcam\n* Has timer which automatically saves detected smokers within webcam, therefore no need for someone to watch over\n* can change user details\n* can enlarge, delete and save detected smokers within notifications\n" }, { "alpha_fraction": 0.8636363744735718, "alphanum_fraction": 0.8727272748947144, "avg_line_length": 11.222222328186035, "blob_id": "a2e190c7be00c36e774e1e6fe157b1889195e2b4", "content_id": "dee3a5c5e4f39915e2e5356102fb79dcf43d0168", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 110, "license_type": "no_license", "max_line_length": 20, "num_lines": 9, "path": "/designs/requirements.txt", "repo_name": "viathus/Image-object-detection-application", "src_encoding": "UTF-8", "text": "requests\npython-firebase\nPyQt5\ntensorflow\nvalidate_email\nPillow\nopencv-python\ngoogle-cloud-storage\nmatplotlib\n" }, { "alpha_fraction": 0.5852338671684265, "alphanum_fraction": 0.5954873561859131, "avg_line_length": 43.0560188293457, "blob_id": "53d5ab30226c980909c9f1167e72c72d21393b04", "content_id": "fe026c2dec60a907ae41a5849b2c91019dc0c5bf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 36963, "license_type": "no_license", "max_line_length": 274, "num_lines": 839, "path": "/designs/implem.py", "repo_name": "viathus/Image-object-detection-application", "src_encoding": "UTF-8", "text": "from firebase import firebase\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nimport sys\nfrom PyQt5.QtWidgets import QFileDialog\nfrom PIL.ImageQt import ImageQt\nimport cv2\nimport tensorflow as tf\nimport numpy as np\nimport os\nfrom PIL import Image\nfrom userprofile import User\nfrom testWidget import test_widget\nfrom design import Ui_MainWindow\nimport threading\nimport time\nfrom validate_email import validate_email\nimport re\nfrom google.cloud import storage\nimport urllib.request\nfrom PyQt5.QtWidgets import qApp\nfrom utils import testdetection as vis_util\nfrom time import gmtime, strftime\nimport datetime\n\n\n#Creates connections to firebase\nos.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"] = \"./pythonapplication-15b3ad1dd76b.json\"\nfirebase = firebase.FirebaseApplication('https://pythonapplication-aa823.firebaseio.com/',None)\nclient = storage.Client()\nbucket = client.get_bucket('pythonapplication-aa823.appspot.com')\n\n\n\nitem = None #useritem\nuserID = None #unique user id to identify in firebase\ncurrent_user = None #the current user\nanswer = None #to check if we retrieve anything from firebase\nfname= None #file name for image\nf1= None #file output location\nflag = True #flag to stop thread\nfvideoname = None #video name\n\n#Options to limit GPU uses\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\n#\n#LOADING IN MODEL\n#\nPATH_TO_MODEL = './Model/smoking_model.pb'\n#Create Tensorflow graph which will allow us to reepesent units of computation and units of data that flow between operations\ndetection_graph = tf.Graph() #creates default graph\nwith detection_graph.as_default(): #allows us to define operations and tensors in detection graph\n od_graph_def = tf.GraphDef() #serialized version of graph, allows us to print, store or restore a graph\n with tf.gfile.GFile(PATH_TO_MODEL, 'rb') as fid: #Reads path to model saves it\n od_graph_def.ParseFromString(fid.read()) #Used saved model and convert it to the serialized version of graph.\n tf.import_graph_def(od_graph_def, name='') #import serialized graph and use it as a session\n\n sess = tf.Session(graph=detection_graph,config=config) #use graph and save it in a sess, so it can be called whenever needed\n\nimage_tensor = detection_graph.get_tensor_by_name('image_tensor:0') #Get input image field\ndetection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0') #Get detection boxes field\ndetection_scores = detection_graph.get_tensor_by_name('detection_scores:0') #Get detection scores field\ndetection_classes = detection_graph.get_tensor_by_name('detection_classes:0') #Get detection classes field\nnum_detections = detection_graph.get_tensor_by_name('num_detections:0') #Get number of detections field\n\n\n#Creates UI and sets up connections for buttons\nclass MainWindow(QtWidgets.QMainWindow):\n def __init__(self):\n super(MainWindow, self).__init__()\n self.ui = Ui_MainWindow()\n self.ui.setupUi(self)\n self.ui.LoginButton.clicked.connect(self.login)\n self.ui.imagedetectbtn.clicked.connect(self.DetectImage)\n self.ui.openFilebtn.clicked.connect(self.openFile)\n self.ui.setPathbtn.clicked.connect(self.outputFile)\n\n self.ui.detectimage.clicked.connect(self.switchImageWidget)\n self.ui.detectvideo.clicked.connect(self.switchVideoWidget)\n self.ui.webcamDetect.clicked.connect(self.switchWebcamWidget)\n self.ui.usernotifications.clicked.connect(self.switchNotificationsWidget)\n self.ui.userSettings.clicked.connect(self.switchSettingsWidget)\n self.ui.logout.clicked.connect(self.switchLoginWidget)\n self.ui.signbtn.clicked.connect(self.switchSignupWidget)\n self.ui.createAccountbtn.clicked.connect(self.signup)\n self.ui.changeUserInfobtn.clicked.connect(self.switchUserInfoWidget)\n self.ui.Changepasswordbtn.clicked.connect(self.switchPasswordWidget)\n self.ui.saveuserinfobtn.clicked.connect(self.saveUserInfo)\n self.ui.savenewpasswordbtn.clicked.connect(self.saveNewPassword)\n self.ui.detectCamera.clicked.connect(self.detectCameraIP)\n self.ui.backToNotification.clicked.connect(self.switchNotificationsWidget)\n self.ui.filterbtn.clicked.connect(self.filterValues)\n self.ui.detectCamera_2.clicked.connect(self.detectVideo)\n self.ui.openFilebtn_2.clicked.connect(self.openVideoFile)\n self.ui.homebutton.clicked.connect(self.switchHomeWidget)\n #Create event for notification so user can delete or save\n self.ui.listWidget_2.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)\n self.ui.listWidget_2.customContextMenuRequested.connect(self.rightClickFunction)\n self.customMenu = QtWidgets.QMenu('Menu', self.ui.listWidget_2)\n self.customMenu.addAction(QtGui.QIcon('images/resize.png'), \"Enlarge\", self.enlargeImage)\n self.customMenu.addAction(QtGui.QIcon('images/save.png'), \"Save\", self.saveFunction)\n self.customMenu.addAction(QtGui.QIcon('images/delete.png'), \"Delete\", self.deleteFunction)\n\n\n #Filter\n def filterValues(self):\n #Array containing checkbox values\n Detection_type = []\n if self.ui.checkBox.isChecked():\n Detection_type.append(\"image\")\n if self.ui.checkBox_2.isChecked():\n Detection_type.append(\"video\")\n if self.ui.checkBox_3.isChecked():\n Detection_type.append(\"webcam\")\n\n #Get date from user input and rearrange format\n date1 = datetime.datetime.strptime(str(self.ui.dateEdit_2.date().toPyDate()), '%Y-%m-%d').strftime('%d-%m-%Y')\n date2 = datetime.datetime.strptime(str(self.ui.dateEdit_3.date().toPyDate()), '%Y-%m-%d').strftime('%d-%m-%Y')\n\n #Go through each widget and filter it depending on what the user has picked\n for row in range(self.ui.listWidget_2.count()):\n index = self.ui.listWidget_2.item(row)\n widget = self.ui.listWidget_2.itemWidget(index)\n if self.filter(Detection_type, date1,date2,5,5,widget):\n index.setHidden(False)\n else:\n index.setHidden(True)\n\n\n\n\n #Filter if statements checks certain conditiions\n def filter(self, detection_types, date1, date2, time1, time2, widget):\n date_check = widget.customWidgetDate.text().replace(\"Date: \", \"\")\n type_check = widget.customWidgetType.text().replace(\"Detection: \", \"\")\n\n myDate = datetime.datetime.strptime(date_check,'%d-%m-%Y')\n userDate1 = datetime.datetime.strptime(date1,'%d-%m-%Y')\n userDate2 = datetime.datetime.strptime(date2,'%d-%m-%Y')\n default_date = datetime.datetime.strptime(\"01-01-2000\",'%d-%m-%Y')\n\n #Order date so it doesnt matter which way user enters date\n if userDate1 != default_date and userDate2 != default_date:\n if userDate1 > userDate2:\n temp = userDate1\n userDate1 = userDate2\n userDate2 = temp\n\n\n \n #Filter\n #\n #Simple filter with IF statements to check if widget has the thing we are looking for if so filter accordingly\n #\n if len(detection_types) == 0:\n if default_date == userDate1 and default_date != userDate2:\n if userDate2 == myDate:\n return True\n else:\n return False\n elif default_date != userDate1 and default_date == userDate2:\n if userDate1 == myDate:\n return True\n else:\n return False\n elif userDate1 != default_date and userDate2 != default_date:\n if userDate1 <= myDate and userDate2 >= myDate:\n return True\n else:\n return False\n else:\n if default_date == userDate1 and default_date != userDate2:\n if userDate2 == myDate and type_check in detection_types:\n return True\n else:\n return False\n elif default_date != userDate1 and default_date == userDate2:\n if userDate1 == myDate and type_check in detection_types:\n return True\n else:\n return False\n elif userDate1 != default_date and userDate2 != default_date:\n if userDate1 <= myDate and userDate2 >= myDate and type_check in detection_types:\n return True\n else:\n return False\n elif userDate1 == default_date and userDate2 == default_date:\n if type_check in detection_types:\n return True\n else:\n return False\n return True\n\n\n\n\n\n\n #Enlarges image for detection image in notification\n def enlargeImage(self):\n self.ui.stackedWidget_4.setCurrentWidget(self.ui.page_4)\n self.ui.stackedWidget_2.setCurrentWidget(self.ui.largeImage)\n\n #Takes image and breaks the file name apart so it can be stored correctly\n widget = self.ui.listWidget_2.itemWidget(self.ui.listWidget_2.currentItem())\n date = widget.customWidgetDate.text().replace(\"Date: \", \"\")\n time = widget.customWidgetScore.text().replace(\"Time: \", \"\")\n time = time.replace(\":\",\"-\")\n type = widget.customWidgetType.text().replace(\"Detection: \", \"\")\n value = str(date+ \" \" +time + \" \" + type + \".jpg\")\n\n\n #Get the user images and downloads them and displays it to the notifications\n imageBlob = bucket.blob(current_user + \"/\" + value)\n with urllib.request.urlopen(imageBlob.public_url) as url:\n s = url.read()\n pixmap1 = QtGui.QPixmap()\n pixmap1.loadFromData(s)\n pixmap = QtGui.QPixmap(pixmap1)\n self.ui.enlargeImagelabel.setPixmap(QtGui.QPixmap(pixmap))\n self.ui.enlargeImagelabel.setScaledContents(True)\n self.ui.enlargeImagelabel.setSizePolicy(QtWidgets.QSizePolicy.Ignored, QtWidgets.QSizePolicy.Ignored)\n self.ui.label_6.setText(widget.customWidgetDate.text())\n self.ui.label_32.setText(widget.customWidgetScore.text())\n self.ui.label_33.setText(widget.customWidgetType.text())\n\n\n #Allows you to save the images in notifications\n def saveFunction(self):\n self.ui.stackedWidget_4.setCurrentWidget(self.ui.page_4)\n file = str(QFileDialog.getExistingDirectory(self, \"Select Directory\"))\n if file != \"\":\n widget = self.ui.listWidget_2.itemWidget(self.ui.listWidget_2.currentItem())\n date = widget.customWidgetDate.text().replace(\"Date: \", \"\")\n time = widget.customWidgetScore.text().replace(\"Time: \", \"\")\n time = time.replace(\":\",\"-\")\n type = widget.customWidgetType.text().replace(\"Detection: \", \"\")\n value = str(date+ \" \" +time + \" \" + type + \".jpg\")\n print(file+\"/\"+value)\n imageBlob = bucket.blob(current_user + \"/\" + value) #Get the users folder from database, and their images\n urllib.request.urlretrieve(imageBlob.public_url, file+\"/\"+value)\n self.ui.stackedWidget_4.setCurrentWidget(self.ui.page_4)\n\n #Allows you to delete notifications from your database\n def deleteFunction(self):\n self.ui.stackedWidget_4.setCurrentWidget(self.ui.page_4)\n global item\n global firebase\n widget = self.ui.listWidget_2.itemWidget(self.ui.listWidget_2.currentItem())\n date = widget.customWidgetDate.text().replace(\"Date: \", \"\")\n time = widget.customWidgetScore.text().replace(\"Time: \", \"\")\n time = time.replace(\":\",\"-\")\n type = widget.customWidgetType.text().replace(\"Detection: \", \"\")\n value = str(date+ \" \" +time + \" \" + type + \".jpg\")\n self.ui.listWidget_2.removeItemWidget(self.ui.listWidget_2.currentItem())\n self.ui.listWidget_2.takeItem(self.ui.listWidget_2.row(self.ui.listWidget_2.currentItem()))\n widget.deleteLater()\n key = None\n for k,v in answer[\"imagePaths\"].items():\n if v == value:\n break\n firebase.delete(\"/users/\" + userID + \"/imagePaths/\" + k, None)\n\n\n print(\"delete\")\n\n #Creates right click options with enlarge, save, delete\n def rightClickFunction (self, event):\n global item\n index = self.ui.listWidget_2.indexAt(event)\n if not index.isValid():\n return\n item = self.ui.listWidget_2.indexAt(event)\n self.customMenu.popup(QtGui.QCursor.pos())\n\n\n\n #Saves new password\n def saveNewPassword(self):\n if self.ui.lineEdit_6.text() == answer[\"password\"]:\n value = self.passwordValidation(self.ui.lineEdit_7.text())\n if \"perfect\" == value:\n if self.ui.lineEdit_7.text() == self.ui.lineEdit_8.text():\n firebase.put('/users/'+ userID ,\"password\" , self.ui.lineEdit_7.text())\n self.ui.errorlabel.setText(\"Updated password\")\n else:\n self.ui.errorlabel.setText(\"Passwords not the same\")\n else:\n self.ui.errorlabel.setText(value)\n else:\n self.ui.errorlabel.setText(\"Current password is incorrect\")\n\n\n #Saves user info\n def saveUserInfo(self):\n firebase.put('/users/'+ userID ,\"firstName\" , self.ui.lineEdit_3.text())\n firebase.put('/users/'+ userID ,\"lastName\" , self.ui.lineEdit_4.text())\n firebase.put('/users/'+ userID ,\"email\" , self.ui.lineEdit_5.text())\n\n\n#These set up the switches between the different pages for the application\n def switchUserInfoWidget(self):\n self.ui.stackedWidget_3.setCurrentWidget(self.ui.page)\n if answer is not None:\n self.ui.lineEdit_3.setText(answer[\"firstName\"])\n self.ui.lineEdit_4.setText(answer[\"lastName\"])\n self.ui.lineEdit_5.setText(answer[\"email\"])\n def switchHomeWidget(self):\n self.ui.stackedWidget_2.setCurrentWidget(self.ui.page_6)\n self.ui.stackedWidget_4.setCurrentWidget(self.ui.page_4)\n def switchPasswordWidget(self):\n self.ui.stackedWidget_3.setCurrentWidget(self.ui.page_2)\n self.ui.stackedWidget_4.setCurrentWidget(self.ui.page_4)\n def switchImageWidget(self):\n self.ui.label_21.setText(\"\")\n self.ui.fileLabel.setText(\"Open file path\")\n self.ui.pathLabel.setText(\"set file path\")\n self.ui.stackedWidget_2.setCurrentWidget(self.ui.detect_image_page)\n self.ui.stackedWidget_4.setCurrentWidget(self.ui.page_4)\n def switchWebcamWidget(self):\n self.ui.label_9.setText(\"\")\n self.ui.cameraIP.setText(\"\")\n self.ui.locationCamera.setText(\"\")\n self.ui.stackedWidget_2.setCurrentWidget(self.ui.detect_webcam_page)\n self.ui.stackedWidget_4.setCurrentWidget(self.ui.page_4)\n def switchVideoWidget(self):\n self.ui.errorvideo.setText(\"\")\n self.ui.fileLabel_2.setText(\"Open file path\")\n self.ui.stackedWidget_2.setCurrentWidget(self.ui.detect_video_page)\n self.ui.stackedWidget_4.setCurrentWidget(self.ui.page_4)\n def switchSettingsWidget(self):\n self.ui.stackedWidget_2.setCurrentWidget(self.ui.setting_page)\n self.ui.stackedWidget_4.setCurrentWidget(self.ui.page_4)\n def switchNotificationsWidget(self):\n self.ui.stackedWidget_2.setCurrentWidget(self.ui.notification_page)\n self.ui.stackedWidget_4.setCurrentWidget(self.ui.page_5)\n def switchLoginWidget(self):\n self.ui.stackedWidget.setCurrentWidget(self.ui.login_page)\n self.ui.stackedWidget_2.setCurrentWidget(self.ui.page_6)\n self.ui.stackedWidget_4.setCurrentWidget(self.ui.page_5)\n self.ui.lineEdit_2.setText(\"\")\n self.ui.listWidget_2.clear()\n self.ui.cameraIP.setText(\"\")\n self.ui.label_17.setText(\"\")\n self.ui.locationCamera.setText(\"\")\n self.ui.fileLabel_2.setText(\"Open file path\")\n self.ui.fileLabel.setText(\"Open file path\")\n self.ui.pathLabel.setText(\"set file path\")\n item = None\n userID = None\n current_user = None\n answer = None\n fname= None\n f1= None\n flag = True\n fvideoname = None\n\n\n #creates a thread which is consistenly checking users input and seeing if they have entered everything correctly\n def newFunc(self):\n global flag\n global firebase\n result = firebase.get('/users',None)\n time.sleep(2)\n while flag:\n #Check username field\n answer = None\n if self.ui.usernamelabel.text():\n if len(self.ui.usernamelabel.text()) >= 4:\n answer = next((item for item in result.values() if item[\"username\"] == self.ui.usernamelabel.text()), None)\n if answer is None:\n self.ui.UsernameImg.setPixmap(QtGui.QPixmap(\"./images/greentick.png\"))\n self.ui.usernameBtn.setText(\"Perfect!\")\n else:\n self.badUsernameField()\n self.ui.usernameBtn.setText(\"Username already exists\")\n else:\n self.badUsernameField()\n self.ui.usernameBtn.setText(\"Username must be at least 4 characters\")\n else:\n self.badUsernameField()\n self.ui.usernameBtn.setText(\"Username must be unique\")\n\n #Check email fields\n if self.ui.emaillabel.text():\n if validate_email(self.ui.emaillabel.text()):\n self.goodEmailField()\n self.ui.emailBtn.setText(\"Perfect!\")\n else:\n self.badEmailField()\n self.ui.emailBtn.setText(\"Email must be valid\")\n else:\n self.badEmailField()\n self.ui.emailBtn.setText(\"Email must be valid\")\n\n #Check password fields\n if self.ui.passwordlabel.text():\n vald1 = self.passwordLengthValidation(self.ui.passwordlabel.text())\n vald3 = self.passwordNumberValidation(self.ui.passwordlabel.text())\n vald2 = self.passwordCapitalValidation(self.ui.passwordlabel.text())\n\n if vald1 == \"perfect\":\n self.goodPasswordLengthField()\n self.ui.passwordBtn.setText(\"Perfect!\")\n else:\n self.badPasswordLengthField()\n self.ui.passwordBtn.setText(\"Password must be longer than 4 characters\")\n\n if vald2 == \"perfect\":\n self.goodPasswordCapitalField()\n self.ui.passwordBtn_2.setText(\"Perfect!\")\n else:\n self.badPasswordCapitalField()\n self.ui.passwordBtn_2.setText(\"Password must have a capital letter\")\n\n if vald3 == \"perfect\":\n self.goodPasswordNumberField()\n self.ui.passwordBtn_3.setText(\"Perfect!\")\n else:\n self.badPasswordNumberField()\n self.ui.passwordBtn_3.setText(\"Password must have a number\")\n\n if self.ui.passwordlabel.text() == self.ui.passwordlabel2.text():\n self.goodPasswordSameField()\n self.ui.passwordBtn_4.setText(\"Perfect!\")\n else:\n self.badPasswordSameField()\n self.ui.passwordBtn_4.setText(\"Passwords must be the same\")\n else:\n self.badPasswordNumberField()\n self.badPasswordCapitalField()\n self.badPasswordLengthField()\n self.badPasswordSameField()\n self.ui.passwordBtn_3.setText(\"Password must have a number\")\n self.ui.passwordBtn_2.setText(\"Password must have a capital letter\")\n self.ui.passwordBtn.setText(\"Password must be longer than 4 characters\")\n self.ui.passwordBtn_4.setText(\"Passwords must be the same\")\n\n\n # value = self.passwordValidation(self.ui.passwordlabel.text())\n # if value == \"perfect\":\n # if self.ui.passwordlabel2.text():\n # if self.ui.passwordlabel.text() == self.ui.passwordlabel2.text():\n # self.goodPasswordField()\n # self.ui.passwordBtn.setText(\"Password is perfect!\")\n # else:\n # self.badPasswordField()\n # self.ui.passwordBtn.setText(\"Passwords must be the same\")\n # else:\n # self.badPasswordField()\n # self.ui.passwordBtn.setText(\"re-password field is empty!\")\n # else:\n # self.badPasswordField()\n # self.ui.passwordBtn.setText(value)\n # else:\n # self.badPasswordField()\n # self.ui.passwordBtn.setText(\"password field is empty!\")\n\n\n\n\n def switchSignupWidget(self):\n self.ui.stackedWidget.setCurrentWidget(self.ui.page_3)\n self.ui.label_17.setText(\"\")\n threading.Thread(target=self.newFunc, daemon=True).start()\n\n\n def passwordLengthValidation(self, password):\n if len(password) <= 4:\n return \"Make sure your password is at least 5 letters\"\n else:\n return \"perfect\"\n\n def passwordNumberValidation(self, password):\n if re.search('[0-9]', password) is None:\n return \"Make sure your password has a number in it\"\n else:\n return \"perfect\"\n\n def passwordCapitalValidation(self, password):\n if re.search('[A-Z]', password) is None:\n return \"Make sure your password has a capital letter in it\"\n else:\n return \"perfect\"\n\n\n def goodUsernameField(self):\n self.ui.UsernameImg.setPixmap(QtGui.QPixmap(\"./images/greentick.png\"))\n\n def badUsernameField(self):\n self.ui.UsernameImg.setPixmap(QtGui.QPixmap(\"./images/wrongcross.png\"))\n\n def goodEmailField(self):\n self.ui.emailImg.setPixmap(QtGui.QPixmap(\"./images/greentick.png\"))\n\n def badEmailField(self):\n self.ui.emailImg.setPixmap(QtGui.QPixmap(\"./images/wrongcross.png\"))\n\n def goodPasswordLengthField(self):\n self.ui.passwordImg.setPixmap(QtGui.QPixmap(\"./images/greentick.png\"))\n\n def badPasswordLengthField(self):\n self.ui.passwordImg.setPixmap(QtGui.QPixmap(\"./images/wrongcross.png\"))\n\n def goodPasswordCapitalField(self):\n self.ui.passwordImg_2.setPixmap(QtGui.QPixmap(\"./images/greentick.png\"))\n\n def badPasswordCapitalField(self):\n self.ui.passwordImg_2.setPixmap(QtGui.QPixmap(\"./images/wrongcross.png\"))\n\n def goodPasswordNumberField(self):\n self.ui.passwordImg_3.setPixmap(QtGui.QPixmap(\"./images/greentick.png\"))\n\n def badPasswordNumberField(self):\n self.ui.passwordImg_3.setPixmap(QtGui.QPixmap(\"./images/wrongcross.png\"))\n\n def goodPasswordSameField(self):\n self.ui.passwordImg_4.setPixmap(QtGui.QPixmap(\"./images/greentick.png\"))\n\n def badPasswordSameField(self):\n self.ui.passwordImg_4.setPixmap(QtGui.QPixmap(\"./images/wrongcross.png\"))\n\n def signup(self):\n global firebase\n global flag\n result = firebase.get('/users',None)\n firstname = self.ui.firstnamelabel.text()\n surname = self.ui.surnamelabel.text()\n email = self.ui.emaillabel.text()\n username = self.ui.usernamelabel.text()\n password = self.ui.passwordlabel.text()\n repassword = self.ui.passwordlabel2.text()\n\n #Quickly check which button is checked\n\n\n if \"Perfect\" in self.ui.emailBtn.text() and \"Perfect\" in self.ui.passwordBtn.text() and \"Perfect\" in self.ui.usernameBtn.text() and \"Perfect\" in self.ui.passwordBtn_2.text() and \"Perfect\" in self.ui.passwordBtn_3.text() and \"Perfect\" in self.ui.passwordBtn_4.text():\n flag = False\n\n user = User(firstname,surname, email, username, \"Not specified\", 5, password,[])\n result = firebase.post('/users',user.__dict__)\n\n self.ui.stackedWidget.setCurrentWidget(self.ui.login_page)\n\n else:\n print(\"fix errors\")\n\n\n\n\n\n #LETS THE USER Login\n def login(self):\n global answer\n global firebase\n global current_user\n global userID\n result = firebase.get('/users',None)\n username = self.ui.lineEdit.text()\n password = self.ui.lineEdit_2.text()\n\n userID = next((item for item in result if result[item][\"username\"] == username), None) #Get user ID\n answer = next((item for item in result.values() if item[\"username\"] == username), None) #Get user DATA\n\n if answer is None:\n self.ui.label_17.setText(\"User doesn't exist\")\n else:\n if answer[\"password\"] == password:\n current_user = answer[\"username\"]\n #firebase.post('/users/'+ userID + '/imagePaths','C:/Users/abdsaam/Desktop/pythonapp/designs/images/greentick.png')\n self.ui.stackedWidget.setCurrentIndex(0)\n\n qApp.processEvents()\n if \"imagePaths\" in answer:\n for x in answer[\"imagePaths\"].values():\n print(x)\n imageBlob = bucket.blob(current_user + \"/\" + x) #Get the users folder from database, and their images\n print(imageBlob.public_url)\n with urllib.request.urlopen(imageBlob.public_url) as url:\n s = url.read()\n wid = test_widget()\n wid.setImage(s)\n values = x.split()\n\n\n wid.setLabelDate(str(\"Date: \" + values[0]))\n wid.setLabelTime(str(\"Time: \" + values[1].replace(\"-\", \":\")))\n wid.setLabelType(str(\"Detection: \" + values[2].replace(\".jpg\",\"\")))\n widgetItem = QtWidgets.QListWidgetItem(self.ui.listWidget_2)\n widgetItem.setSizeHint(wid.sizeHint())\n self.ui.listWidget_2.addItem(widgetItem)\n self.ui.listWidget_2.setItemWidget(widgetItem,wid)\n\n\n #imageBlob.upload_from_filename('C:/Users/abdsaam/Desktop/pythonapp/designs/images/greentick.png')\n\n # imageBlob = bucket.blob(self.ui.lineEdit.text()+\"/\")\n #print(imageBlob)\n else:\n self.ui.label_17.setText(\"Incorrect password\")\n\n\n def DetectImage(self):\n self.ui.label_21.setText(\"Loading.... Application might freeze....\")\n qApp.processEvents()\n global f1, fname\n\n\n image = cv2.imread(fname[0]) #get filename\n image_expanded = np.expand_dims(image, axis=0) #convert to numpy array\n\n #Saves model output into 4 values\n (boxes, scores, classes, num) = sess.run([detection_boxes, detection_scores, detection_classes, num_detections],\n feed_dict={image_tensor: image_expanded}) #uses model that was loaded in and feeds it a np array of the image we wish to have detected\n\n #Detect image\n vis_util.visualize_boxes_and_labels_on_image_array(\n image,\n np.squeeze(boxes),\n np.squeeze(classes).astype(np.int32),\n np.squeeze(scores),\n {1: {'name': 'smoker', 'id': 1}},\n use_normalized_coordinates=True,\n line_thickness=8,\n min_score_thresh=0.90)\n\n\n\n # selected_indices = tf.image.non_max_suppression(boxes[0], scores[0], 5, 0.5)\n # print(scores[0])\n # print(selected_indices)\n # print(selected_indices.get_shape().as_list())\n # selected_boxes = tf.gather(boxes, selected_indices)\n # print(selected_boxes)\n # print(selected_boxes.get_shape().as_list())\n # threshold = 0.5\n # height, width = image.shape[:2] #gets height and width of image\n # for x in selected_indices.get_shape().as_list():\n # if x is None:\n # ymin = boxes[0][0][0]*height\n # xmin = boxes[0][0][1]*width\n # ymax = boxes[0][0][2]*height\n # xmax = boxes[0][0][3]*width\n # else:\n # ymin = boxes[0][x][0]*height\n # xmin = boxes[0][x][1]*width\n # ymax = boxes[0][x][2]*height\n # xmax = boxes[0][x][3]*width\n # cv2.rectangle(image, (int(xmin),int(ymin)), (int(xmax),int(ymax)),(0,255,0),5)\n\n #Get the date time and save it\n datetimevalue = strftime(\"%d-%m-%Y %H-%M-%S\", gmtime())\n cv2.imwrite(f1 + \"/\" + datetimevalue + \" \" + \"image\" + \".jpg\", image)\n pixmap = QtGui.QPixmap(f1 + \"/\" + datetimevalue + \" \" + \"image\" + \".jpg\")\n scaled_pixmap = pixmap.scaled(500,500,QtCore.Qt.KeepAspectRatio)\n\n #Post the image to the database with the date and time\n self.ui.label_11.setPixmap(QtGui.QPixmap(scaled_pixmap))\n if self.ui.saveNotificationsImage.isChecked():\n value = datetimevalue + \" \" + \"image\" + \".jpg\"\n firebase.post('/users/'+ userID + '/imagePaths', value)\n imageBlob = bucket.blob(current_user + \"/\" + value)\n path = f1 + \"/\" + datetimevalue + \" \" + \"image\" + \".jpg\"\n imageBlob.upload_from_filename(path)\n self.newNotification(f1 + \"/\" + value, value)\n\n self.ui.label_21.setText(\"Done!\")\n\n def newNotification(self, path, s):\n wid = test_widget()\n wid.setImage1(path)\n values = s.split()\n wid.setLabelDate(str(\"Date: \" + values[0]))\n wid.setLabelTime(str(\"Time: \" + values[1].replace(\"-\", \":\")))\n wid.setLabelType(str(\"Detection: \" + values[2].replace(\".jpg\",\"\")))\n widgetItem = QtWidgets.QListWidgetItem(self.ui.listWidget_2)\n widgetItem.setSizeHint(wid.sizeHint())\n self.ui.listWidget_2.insertItem(0, widgetItem)\n self.ui.listWidget_2.setItemWidget(widgetItem,wid)\n\n def outputFile(self):\n global f1\n f1 = QFileDialog.getExistingDirectory(None, \"Select Output Folder\")\n self.ui.pathLabel.setText(f1)\n\n def openFile(self):\n global fname\n fname = QFileDialog.getOpenFileName(None,'Open file','c:\\\\','Image files (*.jpg *.png)')\n pixmap = QtGui.QPixmap(fname[0])\n scaled_pixmap = pixmap.scaled(500,500,QtCore.Qt.KeepAspectRatio)\n self.ui.label_11.setPixmap(QtGui.QPixmap(scaled_pixmap))\n self.ui.fileLabel.setText(fname[0])\n\n def openVideoFile(self):\n global fvideoname\n fvideoname = QFileDialog.getOpenFileName(None,'Open file','c:\\\\','Video files (*.mp4 *.mov)')\n self.ui.fileLabel_2.setText(fvideoname[0])\n\n def detectVideo(self):\n global fvideoname\n self.ui.errorvideo.setText(\"Loading.... Application might freeze....\")\n qApp.processEvents()\n video = cv2.VideoCapture(fvideoname[0])\n if video.isOpened():\n self.ui.errorvideo.setText(\"Done!\")\n current_time = time.time()\n while(video.isOpened()):\n #Get frame and expand it so it can fed to tensorflow\n ret, frame = video.read()\n\n frame_expanded = np.expand_dims(frame, axis=0)\n # Perform the actual detection by running the model with the image as input\n (boxes, scores, classes, num) = sess.run(\n [detection_boxes, detection_scores, detection_classes, num_detections],\n feed_dict={image_tensor: frame_expanded})\n\n #Draws bounding box around the detected part of the image #Helper code\n vis_util.visualize_boxes_and_labels_on_image_array(\n frame,\n np.squeeze(boxes),\n np.squeeze(classes).astype(np.int32),\n np.squeeze(scores),\n {1: {'name': 'smoker', 'id': 1}},\n use_normalized_coordinates=True,\n line_thickness=8,\n min_score_thresh=0.90)\n\n #Show the detected image on the window\n cv2.imshow(\"video\", frame)\n\n if scores[0][0]*100 > 90:\n if self.thirty_timer(current_time):\n current_time = time.time()\n datetimevalue = strftime(\"%d-%m-%Y %H-%M-%S\", gmtime())\n cv2.imwrite(\"./videoimages\" + \"/\" + datetimevalue + \" \" + \"video\" + \".jpg\", frame)\n if self.ui.saveNotificationsImage_3.isChecked():\n value = datetimevalue + \" \" + \"video\" + \".jpg\"\n firebase.post('/users/'+ userID + '/imagePaths', value)\n imageBlob = bucket.blob(current_user + \"/\" + value)\n path = \"./videoimages\" + \"/\" + datetimevalue + \" \" + \"video\" + \".jpg\"\n imageBlob.upload_from_filename(path)\n self.newNotification(\"./videoimages\" + \"/\" + value, value)\n\n\n\n #Q or Esc to quit detecting webcam\n k = cv2.waitKey(1)\n if k == ord('q') or k == 27:\n break\n if cv2.getWindowProperty(\"video\",1) == -1:\n break\n\n # Clean up\n video.release()\n cv2.destroyAllWindows()\n else:\n self.ui.errorvideo.setText(\"Error with video file\")\n\n\n def thirty_timer(self,oldtime):\n return time.time() - oldtime >= 30\n\n def detectCameraIP(self):\n\n self.ui.label_9.setText(\"Loading.... Application might freeze....\")\n qApp.processEvents()\n #Check if user is connecting to USB webcam or trying to connect to cameraIP address\n if self.ui.cameraIP.text().isdigit():\n cameraIPvalue = int(self.ui.cameraIP.text())\n else:\n cameraIPvalue = self.ui.cameraIP.text()\n\n video = cv2.VideoCapture(cameraIPvalue)\n video.set(3,1024)\n video.set(4,768)\n\n if video.isOpened():\n self.ui.label_9.setText(\"Done!\")\n current_time = time.time()\n while(True):\n #Get frame and expand it so it can fed to tensorflow\n ret, frame = video.read()\n\n frame_expanded = np.expand_dims(frame, axis=0)\n # Perform the actual detection by running the model with the image as input\n (boxes, scores, classes, num) = sess.run(\n [detection_boxes, detection_scores, detection_classes, num_detections],\n feed_dict={image_tensor: frame_expanded})\n\n #Draws bounding box around the detected part of the image #Helper code\n vis_util.visualize_boxes_and_labels_on_image_array(\n frame,\n np.squeeze(boxes),\n np.squeeze(classes).astype(np.int32),\n np.squeeze(scores),\n {1: {'name': 'smoker', 'id': 1}},\n use_normalized_coordinates=True,\n line_thickness=8,\n min_score_thresh=0.90)\n\n #Show the detected image on the window\n cv2.imshow(self.ui.locationCamera.text(), frame)\n\n if scores[0][0]*100 > 90:\n if self.thirty_timer(current_time):\n current_time = time.time()\n datetimevalue = strftime(\"%d-%m-%Y %H-%M-%S\", gmtime())\n cv2.imwrite(\"./webcamimages\" + \"/\" + datetimevalue + \" \" + \"webcam\" + \".jpg\", frame)\n if self.ui.saveNotificationsImage_2.isChecked():\n value = datetimevalue + \" \" + \"webcam\" + \".jpg\"\n firebase.post('/users/'+ userID + '/imagePaths', value)\n imageBlob = bucket.blob(current_user + \"/\" + value)\n path = \"./webcamimages\" + \"/\" + datetimevalue + \" \" + \"webcam\" + \".jpg\"\n imageBlob.upload_from_filename(path)\n self.newNotification(\"./webcamimages\" + \"/\" + value, value)\n\n\n\n #Q or Esc to quit detecting webcam\n k = cv2.waitKey(1)\n if k == ord('q') or k == 27:\n break\n if cv2.getWindowProperty(self.ui.locationCamera.text(),1) == -1:\n break\n\n # Clean up\n video.release()\n cv2.destroyAllWindows()\n else:\n self.ui.label_9.setText(\"Cannot find camera\")\n\n\nif __name__ == '__main__':\n import sys\n app = QtWidgets.QApplication(sys.argv)\n window = MainWindow()\n window.show()\n sys.exit(app.exec_())\nexit(app.exec_())\n" }, { "alpha_fraction": 0.787401556968689, "alphanum_fraction": 0.7960630059242249, "avg_line_length": 49.720001220703125, "blob_id": "80fdfb67b4380dbea218eded01482ec08d46bd12", "content_id": "d5f943e2e5efc20dd3d1ed3f5aa37f2cc134ddb4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 1270, "license_type": "no_license", "max_line_length": 155, "num_lines": 25, "path": "/designs/README.txt", "repo_name": "viathus/Image-object-detection-application", "src_encoding": "UTF-8", "text": "APPLICATION IS NOT INTENDED FOR DEVICES USING TENSORFLOW CPU, \nRECOMMENDED TO HAVE GPU(NVIDIA) IN ORDER TO RUN THE APPLICATION WITHOUT PROBLEMS. \n\n\nCreating an executable file wasn't possible.Therefore,\nyou have to create a virtual environment with python version 3.5\n\nI will list the steps assuming you have python and anaconda installed on your device (TENSORFLOW CPU):\n\n1. Open command prompt \n2. Conda create -n environment_name python=3.5\t#Creates virtual environment with python version needed \n3. activate environment_name\t\t\t#Activates virtual environment\n4. pip install -r requirements.txt \t\t#Contains all packages needed to run the applicatio, only has to be done once \n5. python implem.py \t\t\t\t#Runs the application\n\n\nThese are the steps to installing tensorflow CPU however this will cause the application to be extremely slow and the application might not work at times. \nDownloading TENSORFLOW-GPU instead will get rid of these problems. The application was created with the intentions of being utilised by the GPU not CPU.\n\n\nThis video will help you install the required extra packages for tensorflow GPU: https://www.youtube.com/watch?v=KZFn0dvPZUQ\n\nIf the video has been followed correctly, \n\nreplace step 4 with pip install -r requirements-gpu.txt\n\n\n" } ]
8
upworka0/scraper_DotNET_website
https://github.com/upworka0/scraper_DotNET_website
ddafd8194f460fb73fc6d4f5660206fe36d6f739
ebd7ab8c7ee167c376cc46f0e254f3acf49ee55a
a22eafc5ea59311a57215f252b3f5848468bb547
refs/heads/master
2020-04-17T08:01:25.340652
2019-01-18T11:27:04
2019-01-18T11:27:04
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6847826242446899, "alphanum_fraction": 0.717391312122345, "avg_line_length": 22.25, "blob_id": "1645cefa22985c31b6316509d03987a49334f477", "content_id": "9a19cb4c204ddaad2682147af6105a6ad5095da1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 92, "license_type": "no_license", "max_line_length": 33, "num_lines": 4, "path": "/README.md", "repo_name": "upworka0/scraper_DotNET_website", "src_encoding": "UTF-8", "text": "# Python3 CLI scraper\nPython3 CLI to scraper \n<br><br><br>\npython3 app.py --csv <output.csv>" }, { "alpha_fraction": 0.5952955484390259, "alphanum_fraction": 0.6158021688461304, "avg_line_length": 34.52857208251953, "blob_id": "5df435f91f3dde0f5eb4cdb53e078b9bda16fc7a", "content_id": "ab4087483f359ca980b21e81063c725b33c92691", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4974, "license_type": "no_license", "max_line_length": 139, "num_lines": 140, "path": "/app.py", "repo_name": "upworka0/scraper_DotNET_website", "src_encoding": "UTF-8", "text": "import requests\nfrom bs4 import BeautifulSoup\nimport csv\nfrom pandas.io.excel import ExcelWriter\nimport pandas\n\n# base url\nURL = \"https://columbusrealtors.com/find.aspx?mode=browse&letter=\"\n# define session\nsession = requests.session()\n# define header of request\nheader = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'}\n# define file names\ncsv_file = \"result.csv\"\nxls_file = \"result.xlsx\"\n# define the pageNumber range when page size is 35\nmaxPagenumber = 235\ncnt = 0\n\n\n# define functions\n\n# getValue\n## param soup : beautiful soup Object:\n## param eleName : element Name\n## param dict : query for element\n## return mixed\ndef getValue(soup, eleName, dict):\n try:\n ele = soup.find(eleName, dict)\n return ele.get('value')\n except:\n return \"\"\n pass\n# savetoCSV\ndef writeToFile(data, isHeader=False):\n if isHeader:\n myFile = open(csv_file, 'w', newline='')\n else:\n myFile = open(csv_file, 'a', newline='')\n with myFile:\n writer = csv.writer(myFile)\n writer.writerow(data)\n myFile.close()\n\n# step 1\n# get html content for get hidden values\nres = session.get(URL)\nsoup = BeautifulSoup(res.text, \"html.parser\")\n__VIEWSTATE = getValue(soup, \"input\", {'id':\"__VIEWSTATE\"})\n__VIEWSTATEGENERATOR = getValue(soup, \"input\", {\"id\" : \"__VIEWSTATEGENERATOR\"})\n__EVENTVALIDATION = getValue(soup, \"input\", {\"id\" : \"__EVENTVALIDATION\"})\n\n\ndef getPageData(pageNumber):\n global __VIEWSTATE, __VIEWSTATEGENERATOR, __EVENTVALIDATION, header, cnt\n if pageNumber < 3:\n __EVENTTARGET = \"ctl00$body$primary_body_1$ctl01$ucSearchResults$radSearchResults$ctl00$ctl02$ctl00$ctl0\" + str(5 + pageNumber * 2)\n else:\n __EVENTTARGET = \"ctl00$body$primary_body_1$ctl01$ucSearchResults$radSearchResults$ctl00$ctl02$ctl00$ctl\" + str(5 + pageNumber* 2)\n data = {\n \"__EVENTTARGET\": __EVENTTARGET,\n \"__VIEWSTATE\": __VIEWSTATE,\n \"__VIEWSTATEGENERATOR\": __VIEWSTATEGENERATOR,\n \"__EVENTVALIDATION\": __EVENTVALIDATION,\n }\n\n # get page data of list\n res = session.post(URL, data=data,headers=header)\n soup1 = BeautifulSoup(res.text, \"html.parser\")\n __VIEWSTATE = getValue(soup1, \"input\", {'id': \"__VIEWSTATE\"})\n __VIEWSTATEGENERATOR = getValue(soup1, \"input\", {\"id\": \"__VIEWSTATEGENERATOR\"})\n __EVENTVALIDATION = getValue(soup1, \"input\", {\"id\": \"__EVENTVALIDATION\"})\n\n data = {\n \"__EVENTTARGET\": \"\",\n \"__VIEWSTATE\": __VIEWSTATE,\n \"__VIEWSTATEGENERATOR\": __VIEWSTATEGENERATOR,\n \"__EVENTVALIDATION\": __EVENTVALIDATION,\n }\n\n dataTable = soup1.find(\"table\", {\"id\" : \"ctl00_body_primary_body_1_ctl01_ucSearchResults_radSearchResults_ctl00\"}).find_all(\"tbody\")[2]\n for tr in dataTable.find_all('tr'):\n # first name, last name, company\n lastName = tr.find_all(\"td\")[1].text\n firstName= tr.find_all(\"td\")[2].text\n company = tr.find_all(\"td\")[3].text\n city = tr.find_all(\"td\")[4].text\n type = tr.find_all(\"td\")[5].text.replace('\\r\\n\\t\\t\\t\\t\\t\\t\\t\\t','').replace('\\r\\n\\t\\t\\t\\t\\t\\t\\t','')\n\n\n # get other data (address, Phone, Fax, Email Address) from detail page\n href = tr.find('a').get('href')\n data.update({\"__EVENTTARGET\": href.replace(\"javascript:__doPostBack('\", \"\").replace(\"','')\", \"\")})\n # get detail data\n res1 = session.post(URL, data=data, headers=header)\n soup2 = BeautifulSoup(res1.text, \"html.parser\")\n dataDiv = soup2.find_all(\"div\", {\"class\": \"island\"})[1]\n ptags = dataDiv.find_all(\"p\")\n #address\n try:\n text = ptags[0].text\n address = text.replace('\\r\\n\\t\\t\\t\\t','').replace(company, '').replace('\\r\\n\\t\\t\\t','').replace('\\n','')\n except:\n address = \"\"\n pass\n #phone\n phone = ptags[1].text.replace('Phone:','').replace('\\n ','')\n #fax\n fax = ptags[2].text.replace('Fax:','').replace('\\n ','')\n #email\n email = ptags[3].text.replace('Email:','').replace('\\n ','')\n dt = [lastName, firstName, company, city, type, address, phone, fax, email]\n cnt += 1\n print(cnt)\n # save one data to csv\n writeToFile(dt)\n\ncsvHeader = [\"Last Name\", \"First Name\", \"Company\", \"City\", \"Type\", \"Address\", \"Phone\", \"Fax\", \"Email Address\"]\nwriteToFile(csvHeader, True)\n\npageNumber = 0\nisFirst = True\nfor number in range(0, maxPagenumber):\n if isFirst == True and pageNumber <= 10:\n pageNumber = pageNumber\n elif isFirst == False and pageNumber <= 11:\n pageNumber = pageNumber\n else:\n isFirst = False\n pageNumber = pageNumber % 10\n print(\"pageNumber\" + str(pageNumber))\n getPageData(pageNumber)\n pageNumber += 1\n\n# convert csv file to excel format\nwith ExcelWriter(xls_file) as ew:\n df = pandas.read_csv(csv_file)\n df.to_excel(ew, sheet_name=\"sheet1\", index=False)\n" } ]
2
takashiAg/easy_mongo_python
https://github.com/takashiAg/easy_mongo_python
194db859c2947e274dda651b3e44195c472918c7
9d0c0dc55c466b72dee8c0b211aee2454b8872a9
088b919582e5063b454a9855342333bf331955bc
refs/heads/master
2021-01-16T11:17:43.339594
2017-08-11T07:32:06
2017-08-11T07:32:06
99,998,789
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5785714387893677, "alphanum_fraction": 0.6357142925262451, "avg_line_length": 19.14285659790039, "blob_id": "1c73d5fa6658745256ef4cb39cc6ad64ef1e6221", "content_id": "df20fd472b4a36aff5c74aee45bf55b88c9c51e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 140, "license_type": "no_license", "max_line_length": 34, "num_lines": 7, "path": "/easy_mongo/__init__.py", "repo_name": "takashiAg/easy_mongo_python", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# coding: utf-8\nfrom easy_mongo.mongo import Mongo\n\n__author__ = 'crawd4274'\n__version__ = '0.0.1'\n__license__ = 'MIT'" }, { "alpha_fraction": 0.6529284119606018, "alphanum_fraction": 0.6550976037979126, "avg_line_length": 26.176469802856445, "blob_id": "1c568a4916ebca1a6b68679020ec961aafc1f8f1", "content_id": "0cddccf8986cc41095f23bb62a852c6d93f9ccaf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 461, "license_type": "no_license", "max_line_length": 59, "num_lines": 17, "path": "/setup.py", "repo_name": "takashiAg/easy_mongo_python", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# coding: utf-8\nfrom setuptools import setup, find_packages\nfrom easy_mongo import __author__, __version__, __license__\n\nsetup(\n name='easy_mongo',\n version=__version__,\n description='easy mongodb',\n license=__license__,\n author=__author__,\n author_email='crawd4274@gmail.com',\n url='https://github.com/takashiAg/easy_mongo_python',\n keywords='sample pip github python',\n packages=find_packages(),\n install_requires=[],\n)" }, { "alpha_fraction": 0.6226765513420105, "alphanum_fraction": 0.6276332139968872, "avg_line_length": 26.3389835357666, "blob_id": "16e0a998ed0868467cd6c543d5b88e755b82e750", "content_id": "9f0e75bd5d86bcef00f80908e995f264e6cc253b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1614, "license_type": "no_license", "max_line_length": 88, "num_lines": 59, "path": "/easy_mongo/mongo.py", "repo_name": "takashiAg/easy_mongo_python", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# coding: utf-8\n\nimport pymongo\nfrom datetime import datetime,timedelta\nimport dateutil.parser\nimport sys\n\nclass Mongo:\n\n host=\"localhost\"\n port=27017\n database_name=\"test\"\n collection_name=\"test\"\n\n def __init__(self):\n self.client=pymongo.MongoClient(self.host, self.port)\n self.change_database()\n\n def change_database(self):\n exec(\"self.collection=self.client.\"+self.database_name+\".\"+self.collection_name)\n\n def insert(self,data):\n data[\"date\"]=datetime.now()\n self.collection.insert_one(data)\n\n def view(self):\n return self.collection.find()\n\n def view_and(self,data):\n return self.collection.find({'$and':data})\n\n def view_or(self,data):\n return self.collection.find({'$or':data})\n\n def count_oneweek(self):\n return self.count_date(datetime.now()+timedelta(weeks=-1),datetime.now())\n\n def count_date(self,startdate,stopdate):\n return self.count_date_with_condition(startdate,stopdate,{})\n\n def count_date_with_condition(self,startdate,stopdate,condition):\n date={\n \"$gte\" : self.convert_to_date(startdate),\n \"$lte\" : self.convert_to_date(stopdate)\n }\n condition.update({'date':date})\n return self.collection.find(condition).count()\n\n def convert_to_date(self,time):\n if isinstance(time,str):\n return dateutil.parser.parse(time)\n elif isinstance(time,datetime):\n return time\n else:\n sys.exit(1)\n\n def disconnect(self):\n self.collection.disconnect()\n\n" } ]
3
tomiju/ISS-QueryByExample
https://github.com/tomiju/ISS-QueryByExample
9291f590121800ee5219f203a7bea33e6d8be71a
a33714b9de80d01e719f9f7cc30b50717d1efba9
66552bd0eb040cf18aa528e671e83f004d7db815
refs/heads/master
2022-11-21T23:46:03.740539
2020-07-25T12:39:56
2020-07-25T12:39:56
281,182,371
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.54078608751297, "alphanum_fraction": 0.5797182321548462, "avg_line_length": 28.371583938598633, "blob_id": "d46943f149a68baf03861e759301b3ca58a8016b", "content_id": "3f7e4a8eeee07cd15611d1f9f0e11f44d83ad650", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5458, "license_type": "no_license", "max_line_length": 113, "num_lines": 183, "path": "/src/program.py", "repo_name": "tomiju/ISS-QueryByExample", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nimport soundfile as sf\nfrom scipy.signal import spectrogram, lfilter, freqz, tf2zpk\nimport scipy\nimport scipy.stats\nfrom scipy import stats\nfrom pydub import AudioSegment\nimport sys\n\nnp.set_printoptions(threshold=sys.maxsize)\n\n#------------------------------------------------------------------\n\n# Funkce pro načtení vstupního .wav souboru\ndef LoadInput(inputFile):\n s, fs = sf.read(inputFile)\n t = np.arange(s.size) / fs\n \n return s, fs, t\n\n# Funkce pro výpočet spektrogramu\ndef Spectrogram(fs, s):\n \n wlen = 25 * fs // 1000\n wshift = 10 * fs // 1000\n woverlap = wlen - wshift\n nperseg = 25 * fs // 1000 \n f, t, sgr = spectrogram(s, fs, noverlap=woverlap, nperseg=nperseg, nfft=511)\n \n sgr_log = 10 * np.log10(sgr+1e-20) # matice \"X\"\n \n return sgr_log, f, t\n\n# Funkce pro přípravu pomocné matice a následného vypočítání hodnot parametrů\ndef CalculateParameters(inputMatrix):\n\n jednickovaM = np.zeros((16,len(inputMatrix)))\n \n for radek in range(16):\n for sloupec in range(radek*16, radek*16+16):\n jednickovaM[radek][sloupec] = 1\n \n \n F = np.dot(jednickovaM, inputMatrix)\n \n return F\n \n# Pomocná funkce pro výpočet hodnoty skóre \ndef QueryPom(F, Q, pp):\n \n pom = 0 \n \n for i in range(Q[0].shape[0]):\n pom_korelace, p_value = stats.pearsonr(Q[:,i], F[:,i + pp])\n pom += pom_korelace\n \n return pom / Q[0].shape[0]\n\n# Funkce pro výpočet hodnoty skóre (korelace)\ndef QueryScore(F, Q):\n\n F_length = F[0].shape[0]\n Q_length = Q[0].shape[0]\n\n max_size = F_length - Q_length\n correlations_vector = np.zeros(F_length)\n \n for i in range(max_size):\n pom_korelace = QueryPom(F, Q, i)\n correlations_vector[i] = pom_korelace\n\n return correlations_vector\n\n#------------------------------------------------------------------\n\ninputFileS = '../sentences/sx136.wav' # input file\ninputF = 'sx136.wav' # input file jméno pro vykreslení\ninputFileQ1 = '../queries/q1.wav'\ninputFileQ2 = '../queries/q2.wav'\n\n\n# Načtení vstupních .wav souborů\ns, fs, t = LoadInput(inputFileS)\n\n#--------------------------------------------------\n# Kreslení grafů\nplt.figure(figsize=(12,7))\nplt.subplot(311)\nplt.subplots_adjust(left=None, bottom=-0.7, right=None, top=0.9, wspace=None, hspace=None)\nplt.gca().set_xlabel('$t$')\nplt.gca().set_ylabel('signal')\nplt.gca().set_title('\"scientific\" and \"development\" vs \"'+inputF +'\"', fontweight=\"bold\")\nplt.plot(t, s)\nplt.margins(0)\n#--------------------------------------------------\n\n# Načtení vstupních .wav souborů\ns1, fs1, t1 = LoadInput(inputFileQ1)\ns2, fs2, t2 = LoadInput(inputFileQ2)\n\n# Spektrogram\nF_input, f11, t11 = Spectrogram(fs, s)\n\n\"\"\"#--------------------------------------------------\n# Kreslení grafů\nplt.subplot(312)\nplt.pcolormesh(t11,f11/1000,F_input)\nplt.gca().set_xlabel('$t[s]$')\nplt.gca().set_ylabel('features')\nplt.margins(0)\n#--------------------------------------------------\"\"\"\n\n# Spektrogram\nQ1_input, f_null, t_null = Spectrogram(fs1, s1)\nQ2_input, f_null, t_null = Spectrogram(fs2, s2)\n\n# Výpočet hodnot parametrů (features)\nF = CalculateParameters(F_input)\nQ1 = CalculateParameters(Q1_input)\nQ2 = CalculateParameters(Q2_input)\n\n#--------------------------------------------------\n# Kreslení grafů\nimport matplotlib.ticker as ticker\nplt.subplot(312)\nplt.pcolormesh(F)\nticks_x = ticker.FuncFormatter(lambda x, pos: '{0:g}'.format(x/100))\nplt.gca().xaxis.set_major_formatter(ticks_x)\nplt.gca().invert_yaxis()\nplt.gca().set_xlabel('$t$')\nplt.gca().set_ylabel('features')\nplt.margins(0)\n#--------------------------------------------------\n\n# Výpočet skóre (korelace) query se vstupní větou\nscoreQ1 = QueryScore(F, Q1)\nscoreQ2 = QueryScore(F, Q2)\n\n#--------------------------------------------------\n# Kreslení grafů\nplt.subplot(313)\nt = np.arange(scoreQ2.size) / 100 #/ fs * 360 / 2\nplt.plot(t, scoreQ2, label='development')\nt = np.arange(scoreQ1.size) / 100 #/ fs * 360 / 2 #* fs/1000000\nplt.plot(t, scoreQ1, label='scientific')\naxes = plt.gca()\naxes.set_ylim([0,1])\naxes.set_ylabel('scores')\nplt.gca().set_xlabel('$t$');\nplt.margins(0)\nplt.legend(loc=1)\nplt.tight_layout()\nplt.show()\n#--------------------------------------------------\n\n# Hledání \"hits\"\n\nfor i in range(scoreQ1.size):\n if scoreQ1[i] >= 0.9:\n print('hit Q1 = {}'.format(i), ', délka je: {}'.format(scoreQ1.size))\n print('čas: {}s'.format(t11[i]))#*16000))\n \n # \"vyseknutí hit z původní nahrávky\n t1 = t11[i]*1000 # převedení na milisekundy\n t2 = t11[len(Q1_input)]*1000\n newAudio = AudioSegment.from_wav(inputFileS)\n newAudio = newAudio[t1:t2]\n newAudio.export('../hits/Q1_{}'.format(inputF), format=\"wav\") #Exports to a wav file in the current path.\n break;\n \nfor i in range(scoreQ2.size):\n if scoreQ2[i] >= 0.9:\n print('hit Q2 = {}'.format(i),', délka je: {}'.format(scoreQ2.size)) \n print('čas: {}s'.format(t11[i]))#*16000))\n \n # \"vyseknutí hit z původní nahrávky\n t1 = t11[i]*1000\n t2 = t11[len(Q2_input)]*1300\n newAudio = AudioSegment.from_wav(inputFileS)\n newAudio = newAudio[t1:t2]\n newAudio.export('../hits/Q2_{}'.format(inputF), format=\"wav\") #Exports to a wav file in the current path.\n break;\n \n \n \n " }, { "alpha_fraction": 0.6933638453483582, "alphanum_fraction": 0.7162471413612366, "avg_line_length": 38.727272033691406, "blob_id": "46927fa60036f2511a6ce42ea4c64179ea852353", "content_id": "0b720225975538086ccffa02fc4c2407b7333ab3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 455, "license_type": "no_license", "max_line_length": 120, "num_lines": 11, "path": "/readme.md", "repo_name": "tomiju/ISS-QueryByExample", "src_encoding": "UTF-8", "text": "# Projekt ISS (2019)\n### Hodnocení: 12/12\n## Název: Query by example\n\n### Eng: Simple audio/speech pattern finder in Python - it tries to find given pattern (query) in set of sentences.\n### Cílem projektu je udělat jednoduchý systém pro vyhledávání v audiu pomocí akustického vzoru, Query by Example (QbE).\n\n* Řešení v **Python** a v Octave\n* Code: ```./src/program.py```\n* Dokumentace: *xjulin08.pdf*\n* zadání viz. *projekt-zadání.pdf*\n" } ]
2
InvInc/invinc
https://github.com/InvInc/invinc
d6f15aa92f93726c15ab3604fa15603504006e50
13058e355e58062dab9f273c197e13f6fd2d67dc
ae54920f8813d1c01871995e5149cda0734e780d
refs/heads/master
2019-08-08T06:04:41.921634
2015-05-07T15:07:27
2015-05-07T15:07:27
27,091,387
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.3695652186870575, "alphanum_fraction": 0.447826087474823, "avg_line_length": 32, "blob_id": "f6e0775563465f73174508f94ce7e953c972bd67", "content_id": "f4ae010385b60dcf84bb9fb8df5d2e91d8690421", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 230, "license_type": "no_license", "max_line_length": 85, "num_lines": 7, "path": "/incoq/tests/programs/comp/nonpattern_out.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from incoq.runtime import *\nE = Set()\nS = Set()\nfor (v1, v2, z) in {(1, 1, 'a'), (1, 2, 'b'), (1, 3, 'c'), (2, 3, 'd'), (3, 4, 'e')}:\n E.add((v1, v2, z))\nS.add(1)\nprint(sorted({x for (x, x2, c) in E if (x2 in S) if (x == x2)}))" }, { "alpha_fraction": 0.5168207287788391, "alphanum_fraction": 0.5356746912002563, "avg_line_length": 29.393259048461914, "blob_id": "48a5b0ced7bae70cfe69b0b747d9d35d132ab936", "content_id": "1137c9d31b4511d0bda69ee20ef20adaf69a6734", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2705, "license_type": "no_license", "max_line_length": 76, "num_lines": 89, "path": "/incoq/tests/invinc/set/test_mask.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Unit tests for mask.py.\"\"\"\n\n\nimport unittest\n\nimport incoq.compiler.incast as L\nfrom incoq.compiler.central import CentralCase\nfrom incoq.compiler.set.mask import *\n\n\nclass TestMask(CentralCase):\n \n def test_construct(self):\n mask1 = Mask('bubw12u')\n mask2 = Mask.from_vars(['a', 'b', 'c', '_', 'a', 'b', 'z'],\n {'a', 'c'})\n self.assertEqual(mask1, mask2)\n \n mask3 = Mask('bbbu')\n mask4 = Mask.from_keylen(3)\n self.assertEqual(mask3, mask4)\n \n with self.assertRaises(ValueError):\n Mask('b0')\n Mask(['b', '1'])\n with self.assertRaises(ValueError):\n Mask(['b', '0'])\n with self.assertRaises(ValueError):\n Mask('1')\n \n self.assertEqual(Mask('bu'), Mask.OUT)\n \n def test_derived(self):\n mask1 = Mask('bu')\n mask2 = Mask('b1')\n mask3 = Mask('uw')\n \n self.assertEqual(mask1.maskstr, 'out')\n \n self.assertTrue(mask2.is_allbound)\n self.assertTrue(mask3.is_allunbound)\n self.assertTrue(mask3.has_wildcards)\n self.assertTrue(mask2.has_equalities)\n self.assertTrue(mask1.is_mixed)\n \n self.assertTrue(mask1.is_keymask)\n self.assertTrue(mask1.is_lookupmask)\n self.assertEqual(mask1.lookup_arity, 1)\n self.assertFalse(mask2.is_keymask)\n self.assertFalse(mask3.is_keymask)\n \n self.assertEqual(len(mask1), 2)\n \n def test_makenode(self):\n node = Mask('bu').make_node()\n exp_node = L.Str('bu')\n self.assertEqual(node, exp_node)\n \n def test_splitvars(self):\n mask = Mask('bubw12u')\n bvs, uvs, eqs = mask.split_vars(['a', 'b', 'c', 'd', 'e', 'f', 'g'])\n self.assertEqual(bvs, ('a', 'c'))\n self.assertEqual(uvs, ('b', 'g'))\n self.assertEqual(eqs, (('a', 'e'), ('b', 'f')))\n \n def test_breakkey(self):\n parts = Mask.breakkey(Mask('bbbu'), L.pe('(1, 2, 3)'))\n exp_parts = (L.pe('1'), L.pe('2'), L.pe('3'))\n self.assertEqual(parts, exp_parts)\n \n parts = Mask.breakkey(Mask('bu'), L.pe('1'))\n exp_parts = (L.pe('1'),)\n self.assertEqual(parts, exp_parts)\n \n def test_make_delta_mask(self):\n mask = Mask('bubw12u')\n mask = mask.make_delta_mask()\n exp_mask = Mask('bbbw12b')\n self.assertEqual(mask, exp_mask)\n \n def test_make_interkey_mask(self):\n mask = Mask('bubw12u')\n mask = mask.make_interkey_mask(['a', 'b'], ['a'])\n exp_mask = Mask('bwuw12w')\n self.assertEqual(mask, exp_mask)\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.5823460817337036, "alphanum_fraction": 0.5828466415405273, "avg_line_length": 36.45624923706055, "blob_id": "c03709eb79e1543c919de1d18ff83efaedc29d33", "content_id": "1c2240a8be53902d6405e6e515050f9fd62a5e8c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5993, "license_type": "no_license", "max_line_length": 73, "num_lines": 160, "path": "/incoq/compiler/demand/demtrans.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Demand-driven incrementalization transformation.\"\"\"\n\n\n__all__ = [\n 'deminc_relcomp',\n]\n\n\nfrom operator import attrgetter\n\nimport incoq.compiler.incast as L\nfrom incoq.compiler.comp import (make_inccomp, inc_relcomp_helper,\n inc_relcomp, inc_changetrack)\n\nfrom .demclause import DemClause\nfrom .tags import (make_structures, filter_comps,\n structures_to_comps, uset_to_comp)\n\n\nclass OuterDemandMaintainer(L.OuterMaintTransformer):\n \n \"\"\"Insert code after an update to read from a demand delta set\n and call the demand or undemand function for each element.\n \"\"\"\n \n def __init__(self, manager, delta_name, demname, at_rels, projvars,\n outsideinvs):\n super().__init__(outsideinvs)\n self.manager = manager\n self.delta_name = delta_name\n self.demname = demname\n self.at_rels = at_rels\n self.projvars = projvars\n \n def visit_SetUpdate(self, node):\n rel = L.get_name(node.target)\n if rel not in self.at_rels:\n return\n \n # New code gets inserted after the update.\n # This is true even if the update was a removal.\n # It shouldn't matter where we do the U-set update,\n # so long as the invariants are properly maintained\n # at the time.\n \n prefix = self.manager.namegen.next_prefix()\n vars = [prefix + v for v in self.projvars]\n \n if node.op == 'add':\n funcname = L.N.demfunc(self.demname)\n else:\n funcname = L.N.undemfunc(self.demname)\n \n call_func = L.Call(L.ln(funcname),\n tuple(L.ln(v) for v in vars),\n (), None, None)\n postcode = L.pc('''\n for S_PROJVARS in DELTA.elements():\n CALL_FUNC\n DELTA.clear()\n ''', subst={'S_PROJVARS': L.tuplify(vars, lval=True),\n 'DELTA': self.delta_name,\n 'CALL_FUNC': call_func})\n \n return self.with_outer_maint(node, funcname, L.ts(node),\n (), postcode)\n\n\ndef deminc_relcomp(tree, manager, comp, compname):\n \"\"\"Incrementalize a relational comprehension, add appropriate\n incrementalized demand structures, and rewrite the maint comps\n to use these structures.\n \"\"\"\n verbose = manager.options.get_opt('verbose')\n use_tag_checks = manager.options.get_opt('tag_checks')\n factory = manager.factory\n \n force_uset = manager.options.get_queryopt(comp, 'uset_force')\n if force_uset is None:\n force_uset = manager.options.get_opt('default_uset_force')\n \n subdem_tags = manager.options.get_opt('subdem_tags')\n \n reorder = manager.options.get_queryopt(comp, 'demand_reorder')\n \n # Get the CompSpec and inc info of the original comprehension.\n inccomp = make_inccomp(tree, manager, comp, compname,\n force_uset=force_uset)\n spec = inccomp.spec\n augmented = inccomp.selfjoin == 'aug'\n \n # Make tags/filters/usets structures.\n ds = make_structures(spec.join.clauses, compname,\n singletag=manager.options.get_opt('single_tag'),\n subdem_tags=subdem_tags,\n reorder=reorder)\n if verbose:\n print(' Tags/filters/usets: ' + ' ' * 31 +\n ', '.join(s.name for s in ds.structs))\n \n # Eliminate Demand from clauses.\n new_clauses = []\n for cl in spec.join.clauses:\n if isinstance(cl, DemClause):\n new_clauses.append(cl.cl)\n else:\n new_clauses.append(cl)\n inccomp.spec = spec = spec._replace(join=spec.join._replace(\n clauses=new_clauses))\n \n # Incrementalize query comp.\n # (Since we've unwrapped demand clauses, the logic for defining\n # inner queries' U-sets for if filtering weren't used won't fire.)\n tree, maintcomps = inc_relcomp_helper(tree, manager, inccomp)\n \n # Rewrite maintcomps to use filters. Prune structures.\n tree, ds = filter_comps(tree, factory, ds, maintcomps,\n use_tag_checks,\n augmented=augmented, subdem_tags=subdem_tags)\n \n manager.stats['dem structs'] += len(ds.structs)\n \n # Incrementalize tags and filters.\n demcomps = structures_to_comps(ds, factory)\n for name, comp in demcomps:\n # When using augmented maintenance code, since the query\n # maintenance goes before an addition and after a removal,\n # make sure the demand invariant maintenance still comes\n # before and after that query maintenance respectively.\n outsideinvs = [compname] if augmented else []\n tree = inc_relcomp(tree, manager, comp, name,\n outsideinvs=outsideinvs)\n \n # Take care of usets.\n usets = sorted(ds.usets, key=attrgetter('i'))\n \n for uset in usets:\n \n at_rels = set(e.enumrel for i, e in enumerate(spec.join.clauses)\n if i < uset.i)\n \n # Maintain U-set change set.\n # FIXME: The delta set name should probably be freshly generated\n # for this comprehension, so as to ensure it does not interfere\n # with another delta set for a different use of the same demand\n # function (i.e. same nested query) in a different occurrence\n # (possibly even within this same outer query?).\n demname = uset.name\n deltaname = L.N.deltaset(demname)\n \n uset_comp = uset_to_comp(ds, uset, factory, spec.join.clauses[0])\n tree = inc_changetrack(tree, manager, uset_comp, deltaname)\n \n tree = OuterDemandMaintainer.run(\n tree, manager, deltaname,\n demname, at_rels,\n L.get_vartuple(uset_comp.resexp),\n None)\n \n return tree\n" }, { "alpha_fraction": 0.47031691670417786, "alphanum_fraction": 0.4783514440059662, "avg_line_length": 37.62643814086914, "blob_id": "caf4c8887bc1890feb6b7b9c68cd83f0a0e51ad1", "content_id": "bef32c8ea9580b480cd909640239c2bfed3ea4e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6721, "license_type": "no_license", "max_line_length": 146, "num_lines": 174, "path": "/incoq/tests/invinc/demand/test_tags.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Unit tests for tags.py.\"\"\"\n\n\nimport unittest\nfrom itertools import chain\n\nimport incoq.compiler.incast as L\nfrom incoq.compiler.comp import AsymptoticOrderer, Join, DeltaInfo\nfrom incoq.compiler.obj import ObjClauseFactory_Mixin\n\nfrom incoq.compiler.demand.demclause import DemClauseFactory_Mixin\nfrom incoq.compiler.demand.tags import *\nfrom incoq.compiler.demand.tags import Tag, Filter, USet\n\n\n#def format_structs(structs):\n# return [s.__dict__ for s in structs]\n\n\nclass CF(DemClauseFactory_Mixin, ObjClauseFactory_Mixin):\n pass\n\n\nclass TestTags(unittest.TestCase):\n \n def make_join(self, source, delta=None):\n join = Join.from_comp(L.pe(\n 'COMP({{... {}}}, [], {{}})'.format(source)),\n CF)\n join = join._replace(delta=delta)\n return join\n \n def setUp(self):\n self.sample_join = self.make_join(\n 'for (x, y) in R for (y, z) in R '\n 'for (z, w) in DEMQUERY(foo, [z], T) for a in S')\n \n self.sample_exp_structs = [\n Tag(i=0, name='Q_Tx', var='x',\n lhs=('x', 'y'), rel='R', reorder_i=0),\n Tag(i=0, name='Q_Ty1', var='y',\n lhs=('x', 'y'), rel='R', reorder_i=0),\n Filter(i=1, name='Q_dR2',\n lhs=('y', 'z'), rel='R',\n preds=('Q_Ty1',), reorder_i=1),\n Tag(i=1, name='Q_Ty2', var='y',\n lhs=('y', 'z'), rel='Q_dR2', reorder_i=1),\n Tag(i=1, name='Q_Tz', var='z',\n lhs=('y', 'z'), rel='Q_dR2', reorder_i=1),\n USet(i=2, name='foo', vars=('z',),\n preds=('Q_Tz',), pred_clauses=None, reorder_i=2),\n Tag(i=2, name='Q_Tw', var='w',\n lhs=('z', 'w'), rel='T', reorder_i=2),\n Tag(i=3, name='Q_Ta', var='a',\n lhs=('a',), rel='S', reorder_i=3),\n ]\n \n self.subdem_nontag_uset = USet(\n i=2, name='foo', vars=('z',),\n preds=None, pred_clauses=self.sample_join.clauses[:2],\n reorder_i=2)\n \n def test_make_structures(self):\n ds = make_structures(self.sample_join.clauses, 'Q',\n singletag=False, subdem_tags=True)\n structs = chain(ds.tags, ds.filters, ds.usets)\n self.assertCountEqual(structs, self.sample_exp_structs)\n \n ds = make_structures(self.sample_join.clauses, 'Q',\n singletag=False, subdem_tags=False)\n self.assertIn(self.subdem_nontag_uset, ds.usets)\n \n def test_prune_structures(self):\n ds = make_structures(self.sample_join.clauses, 'Q',\n singletag=False, subdem_tags=True)\n prune_structures(ds, [], subdem_tags=True)\n structs = chain(ds.tags, ds.filters, ds.usets)\n \n exp_names = ['Q_Ty1', 'Q_Tz', 'Q_dR2', 'foo']\n exp_structs = [s for s in self.sample_exp_structs\n if s.name in exp_names]\n self.assertCountEqual(structs, exp_structs)\n \n def test_used_filters(self):\n join = self.make_join(\n 'for (a, b) in U for (a, x) in _M '\n 'for (b, x) in _M for (x, y) in _M')\n \n ds = make_structures(join.clauses, 'Q',\n singletag=False, subdem_tags=True)\n \n join2 = self.make_join(\n 'for (a, b) in U for (a, x) in _M '\n 'for (b, x) in _M for (x, y) in {e}',\n DeltaInfo('_M', L.pe('e'), ('x', 'y'), 'add'))\n ordering = AsymptoticOrderer().get_order(\n enumerate(join2.clauses), [])\n \n used_indices = get_used_filters(ds, ordering, True)\n \n exp_used_indices = [1, 3]\n \n self.assertCountEqual(used_indices, exp_used_indices)\n \n def test_structures_to_comps(self):\n join = self.make_join(\n 'for (x, y) in R for (y, z) in R for a in S')\n \n ds = make_structures(join.clauses, 'Q',\n singletag=False, subdem_tags=True)\n prune_structures(ds, [1], subdem_tags=True)\n res = structures_to_comps(ds, CF)\n \n exp_res = [\n ('Q_Ty1', L.pe('COMP({y for (x, y) in R}, [], {})')),\n ('Q_dR2', L.pe('COMP({(y, z) for y in Q_Ty1 '\n 'for (y, z) in R}, [], {})')),\n ]\n \n self.assertEqual(res, exp_res)\n \n def test_uset_to_comp(self):\n ds = make_structures(self.sample_join.clauses, 'Q',\n singletag=False, subdem_tags=True)\n uset = ds.usets[0]\n comp = uset_to_comp(ds, uset, CF, self.sample_join.clauses[0])\n \n exp_comp = L.pe('COMP({z for z in Q_Tz}, [], {})')\n \n self.assertEqual(comp, exp_comp)\n \n ds = make_structures(self.sample_join.clauses, 'Q',\n singletag=False, subdem_tags=False)\n uset = ds.usets[0]\n comp = uset_to_comp(ds, uset, CF, self.sample_join.clauses[0])\n \n exp_comp = L.pe('COMP({z for (x, y) in R for (y, z) in R}, [], {})')\n \n self.assertEqual(comp, exp_comp)\n \n def test_filter_comps(self):\n join = self.make_join(\n 'for (a, b) in R for (b, c) in S for (c, d) in _M')\n comp1 = L.pe(\n 'COMP({(a, b, c, d) for (a, b) in deltamatch(S, \"bb\", e, 1) for (b, c) in S for (c, d) in _M}, '\n '[], {})')\n comp2 = L.pe(\n 'COMP({(a, b, c, d) for (a, b) in R for (b, c) in S for (c, d) in deltamatch(_M, \"bb\", e, 1)}, '\n '[], {})')\n \n tree = L.p('''\n print(COMP1)\n print(COMP2)\n ''', subst={'COMP1': comp1, 'COMP2': comp2})\n \n ds = make_structures(join.clauses, 'Q',\n singletag=False, subdem_tags=True)\n tree, ds = filter_comps(tree, CF, ds,\n [comp1, comp2],\n True, augmented=False, subdem_tags=True)\n struct_names = [s.name for s in ds.tags + ds.filters + ds.usets]\n \n exp_tree = L.p('''\n print(COMP({(a, b, c, d) for (a, b) in deltamatch(S, 'bb', e, 1) for (b, c) in Q_dS for (c, d) in _M}, [], {}))\n print(COMP({(a, b, c, d) for (a, b) in R for (b, c) in Q_dS for (c, d) in deltamatch(Q_d_M, 'bb', e, 1) for (c, d) in Q_d_M}, [], {}))\n ''')\n exp_struct_names = ['Q_Tb1', 'Q_dS', 'Q_Tc', 'Q_d_M']\n \n self.assertEqual(tree, exp_tree)\n self.assertCountEqual(struct_names, exp_struct_names)\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.41208791732788086, "alphanum_fraction": 0.4780219793319702, "avg_line_length": 19.33333396911621, "blob_id": "8215feccf17ba827003cb2b50d2ee6893f3ce757", "content_id": "a20715059c966f9b92da8817f0ec93b9f1cfc89f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 182, "license_type": "no_license", "max_line_length": 47, "num_lines": 9, "path": "/incoq/tests/programs/auxmap/deadcode_out.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from incoq.runtime import *\nR = Set()\nS = Set()\nfor (x, y) in [(1, 2), (1, 3), (2, 3), (1, 4)]:\n R.add((x, y))\n S.add((x, y))\nR.remove((1, 4))\nS.remove((1, 4))\nprint(sorted(R))" }, { "alpha_fraction": 0.4694323241710663, "alphanum_fraction": 0.4890829622745514, "avg_line_length": 12.878787994384766, "blob_id": "814744f8c45aad23974be6a3768ecb0723fba735", "content_id": "84511953a587dfc19790d0d19889fc0af678e453", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 458, "license_type": "no_license", "max_line_length": 34, "num_lines": 33, "path": "/incoq/tests/programs/objcomp/map_in.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "# Map object-domain conversion.\n\nfrom incoq.runtime import *\n\nOPTIONS(\n obj_domain = True,\n)\n\nm = Map()\ns1 = Set()\ns2 = Set()\nm['a'] = s1\nm['b'] = s2\n\nfor i in range(10):\n o = Obj()\n o.i = i\n if i % 2:\n s1.add(o)\n else:\n s2.add(o)\n\nQUERYOPTIONS(\n '{o.i for o in m[k]}',\n params = ['m', 'k'],\n uset_mode = 'none',\n impl = 'inc',\n)\n\nk = 'a'\nprint(sorted({o.i for o in m[k]}))\nk = 'b'\nprint(sorted({o.i for o in m[k]}))\n" }, { "alpha_fraction": 0.5122169256210327, "alphanum_fraction": 0.5140047669410706, "avg_line_length": 37.1363639831543, "blob_id": "107dbab95ce3d31a9093ff7e3b1a58cee03ea406", "content_id": "c2fec72587952aa6fbd240c0fc64ab2f9341fdd7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3356, "license_type": "no_license", "max_line_length": 79, "num_lines": 88, "path": "/incoq/tests/util/test_type.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "###############################################################################\n# test_type.py #\n# Author: Jon Brandvein #\n###############################################################################\n\n\"\"\"Unit tests for the type module.\"\"\"\n\n\nimport unittest\n\nfrom incoq.util.type import *\n\n\nclass TestTypeChecking(unittest.TestCase, TypeCase):\n \n def test_checktype(self):\n checktype('a', str)\n checktype(True, int) # This is correct, bool subtypes int\n with self.assertRaisesRegex(\n TypeError, 'Expected int; got NoneType object'):\n checktype(None, int)\n \n def test_checktype_seq(self):\n checktype_seq([], str)\n checktype_seq([3, True], int)\n with self.assertRaisesRegex(\n TypeError, 'Expected sequence of bool; got sequence with '\n 'int object'):\n checktype_seq([3, True], bool)\n with self.assertRaisesRegex(\n TypeError, 'Expected sequence of bool; '\n 'got bool object instead of sequence'):\n checktype_seq(True, bool)\n with self.assertRaisesRegex(\n TypeError, 'Expected non-string sequence of str; '\n 'got string'):\n checktype_seq('abc', str)\n with self.assertRaisesRegex(\n TypeError, 'Expected sequence of int; '\n 'got generator object instead of sequence'):\n checktype_seq((i for i in range(3)), int)\n \n def test_checksubclass(self):\n checksubclass(str, object)\n checksubclass(str, str)\n with self.assertRaisesRegex(\n TypeError, 'Expected subclass of str; got int class'):\n checksubclass(int, str)\n \n def test_checksubclass_seq(self):\n checksubclass_seq([str, object], object)\n with self.assertRaisesRegex(\n TypeError, 'Expected sequence of subclasses of str; '\n 'got sequence with int class'):\n checksubclass_seq([str, int], str)\n with self.assertRaisesRegex(\n TypeError, 'Expected sequence of subclasses of str; '\n 'got sequence with int object'):\n checksubclass_seq([str, 3], str)\n \n with self.assertRaisesRegex(\n TypeError, 'Expected sequence of subclasses of str; '\n 'got bool class instead of sequence'):\n checksubclass_seq(bool, str)\n \n def test_TypeCase(self):\n # Here, assertTypeError is the object being tested,\n # not a tool for testing.\n \n with self.assertTypeError(int):\n checktype(object, int)\n \n with self.assertTypeError(int, sequence=True):\n checktype_seq(object, int)\n \n with self.assertTypeError(int, subclass=True):\n checksubclass(4, int)\n \n with self.assertTypeError(int, sequence=True, subclass=True):\n checksubclass_seq([int, bool, 4], int)\n \n with self.assertRaises(AssertionError):\n with self.assertTypeError(int):\n checktype(object, str)\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.44691357016563416, "alphanum_fraction": 0.5086419582366943, "avg_line_length": 19.25, "blob_id": "6aef7affa6e94d261d6ffb86402b51b635f5a227", "content_id": "d88ad8e8174624524ce1ceea33ae152177eca94f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 405, "license_type": "no_license", "max_line_length": 61, "num_lines": 20, "path": "/incoq/tests/programs/comp/uset/uset_explicit_in.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "# Incrementalized comprehensions with U-set.\n\nfrom incoq.runtime import *\n\nR = Set()\n\nfor v1, v2, v3 in {(1, 2, 3), (2, 2, 3), (1, 3, 4)}:\n R.add((v1, v2, v3))\n\na = 1\nb = 2\n\nQUERYOPTIONS(\n '{c for (a2, b2, c) in R if a == a2 if b == b2}',\n params = ['a', 'b'],\n uset_mode = 'explicit',\n uset_params = ['a'],\n impl = 'inc',\n)\nprint(sorted({c for (a2, b2, c) in R if a == a2 if b == b2}))\n" }, { "alpha_fraction": 0.5259493589401245, "alphanum_fraction": 0.5265823006629944, "avg_line_length": 22.939393997192383, "blob_id": "20cf0508bb51cd2bd49d62a60dd7f2dd5af7a74d", "content_id": "94a0bd7b7a768b2feedf71c07d61be950d4de1ef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1580, "license_type": "no_license", "max_line_length": 62, "num_lines": 66, "path": "/incoq/runtime/lru.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "# Simple LRU implementation using linked list and dict.\n# There are many recipes of this floating around; here's mine.\n\n__all__ = [\n 'LRUTracker',\n]\n\n\nclass Node:\n \n __slots__ = ['val', 'prev', 'next']\n \n def __init__(self, val, prev=None, next=None):\n self.val = val\n self.prev = prev\n self.next = next\n\nclass LRUTracker:\n \n \"\"\"A simple least-recently-used tracker. Operations\n are O(1) plus hashing. Elements must be hashable.\n \"\"\"\n \n def __init__(self):\n self.head = self.tail = None\n self.map = {}\n \n def _prepend(self, node):\n if self.head is None:\n self.head = self.tail = node\n else:\n self.head.prev = node\n self.head = node\n \n def _unlink(self, node):\n if node.next is not None:\n node.next.prev = node.prev\n if node.prev is not None:\n node.prev.next = node.next\n if self.head == node:\n self.head = node.next\n if self.tail == node:\n self.tail = node.prev\n \n def add(self, val):\n assert val not in self.map\n node = Node(val, None, self.head)\n self.map[val] = node\n self._prepend(node)\n \n def remove(self, val):\n node = self.map.pop(val)\n self._unlink(node)\n \n def ping(self, val):\n node = self.map[val]\n self._unlink(node)\n self._prepend(node)\n \n def peek(self):\n return self.tail.val\n \n def pop(self):\n val = self.tail.val\n self.remove(val)\n return val\n" }, { "alpha_fraction": 0.547650933265686, "alphanum_fraction": 0.5488611459732056, "avg_line_length": 32.33861541748047, "blob_id": "ad461073c622e68000e6cdb0c788a9c8e91fd8a5", "content_id": "2eb77361eb8b24da437832d140bf5a8772878147", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 23137, "license_type": "no_license", "max_line_length": 78, "num_lines": 694, "path": "/incoq/compiler/central/rewritings.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Miscellaneous rewriters aimed at preprocessing, postprocessing,\noptimization, and making other transformations applicable.\n\"\"\"\n\n\n__all__ = [\n 'import_distalgo',\n 'get_distalgo_message_sets',\n 'RelationFinder',\n 'MacroUpdateRewriter',\n 'SetTypeRewriter',\n 'ObjTypeRewriter',\n 'StrictUpdateRewriter',\n 'MapOpImporter',\n 'UpdateRewriter',\n 'MinMaxRewriter',\n 'eliminate_deadcode',\n 'PassEliminator',\n]\n\n\nfrom incoq.util.collections import OrderedSet\nimport incoq.compiler.incast as L\nfrom incoq.compiler.obj import is_specialrel\n\n\nclass DistalgoImporter(L.MacroProcessor):\n \n \"\"\"Preprocessing for distalgo inc interface programs.\n \n - len() is converted to count()\n - set(<generator expr>) is converted to a SetComp\n - set(<list or tuple>) is converted to a Set literal\n \"\"\"\n \n # Distalgo conversion is done even before importing basic IncAST\n # operations from their Python representation. For consistency,\n # we'll use parse_structast() to avoid creating IncAST-specific\n # nodes at this stage.\n \n def handle_fe_len(self, f, arg):\n return L.parse_structast('count(ARG)',\n subst={'ARG': arg}, mode='expr')\n \n def handle_fe_set(self, f, *args):\n if len(args) == 0:\n return None\n elif len(args) == 1:\n arg = args[0]\n if isinstance(arg, L.GeneratorExp):\n return L.SetComp(arg.elt, arg.generators)\n elif isinstance(arg, (L.List, L.Tuple)):\n return L.Set(arg.elts)\n else:\n return None\n else:\n raise L.ProgramError('set() takes at most one arg')\n\ndef import_distalgo(tree):\n tree = DistalgoImporter.run(tree)\n return tree\n\n\ndef get_distalgo_message_sets(tree):\n \"\"\"Return all variable names that match the pattern for distalgo\n message sets.\n \"\"\"\n vars = L.VarsFinder.run(tree)\n return [v for v in vars\n if v.startswith('_') and\n ('ReceivedEvent_' in v or\n 'SentEvent_' in v)]\n\n\nclass RelationFinder(L.NodeVisitor):\n \n \"\"\"Find variables that we can statically infer to be relations,\n i.e. sets that are unaliased and top-level.\n \n For R to be inferred to be a relation, it must have a global-scope\n initialization having one of the following forms:\n \n R = Set()\n R = incoq.runtime.Set()\n R = set()\n \n and its only other occurrences must have the forms:\n \n - a SetUpdate naming R as the target\n \n - the RHS of membership clauses (including condition clauses)\n \n - the RHS of a For loop\n \"\"\"\n \n def process(self, tree):\n self.inited = OrderedSet()\n self.disqual = OrderedSet()\n super().process(tree)\n return self.inited - self.disqual\n \n # Manage a toplevel flag to record whether we're at global scope.\n \n def visit_Module(self, node):\n self.toplevel = True\n self.generic_visit(node)\n \n def nontoplevel_helper(self, node):\n last = self.toplevel\n self.toplevel = False\n self.generic_visit(node)\n self.toplevel = last\n \n visit_FunctionDef = nontoplevel_helper\n visit_ClassDef = nontoplevel_helper\n \n def visit_Assign(self, node):\n allowed_inits = [\n L.pe('Set()'),\n L.pe('incoq.runtime.Set()'),\n L.pe('set()'),\n ]\n # If this is a relation initializer, mark the relation name\n # and don't recurse.\n if (self.toplevel and\n L.is_varassign(node)):\n name, value = L.get_varassign(node)\n if value in allowed_inits:\n self.inited.add(name)\n return\n \n self.generic_visit(node)\n \n def visit_SetUpdate(self, node):\n # Skip the target if it's just a name.\n if isinstance(node.target, L.Name):\n self.visit(node.elem)\n else:\n self.generic_visit(node)\n \n def visit_For(self, node):\n # Skip the iter if it's just a name.\n if isinstance(node.iter, L.Name):\n self.visit(node.target)\n self.visit(node.body)\n self.visit(node.orelse)\n else:\n self.generic_visit(node)\n \n def visit_Comp(self, node):\n # Skip the iter of each clause if it's just a name.\n # Also recognize condition clauses that express memberships.\n # Always skip the params and options.\n self.visit(node.resexp)\n for cl in node.clauses:\n if (isinstance(cl, L.Enumerator) and\n isinstance(cl.iter, L.Name)):\n self.visit(cl.target)\n elif (isinstance(cl, L.Compare) and\n len(cl.ops) == len(cl.comparators) == 1 and\n isinstance(cl.ops[0], L.In) and\n isinstance(cl.comparators[0], L.Name)):\n self.visit(cl.left)\n else:\n self.visit(cl)\n \n def visit_Name(self, node):\n # We got here through some disallowed use of R.\n self.disqual.add(node.id)\n\n\nclass LegalUpdateValidator(L.NodeVisitor):\n \n \"\"\"Return True if an update operand expression is ok,\n or False if it needs rewriting.\n \"\"\"\n \n class Invalid(BaseException):\n pass\n \n def process(self, tree):\n try:\n super().process(tree)\n except self.Invalid:\n return False\n return True\n \n # Any non-whitelisted node type causes failure.\n \n whitelist = [\n 'Num', 'Str', 'Bytes', 'Name',\n 'Tuple', 'List', 'Dict', 'Set',\n 'Load',\n 'BoolOp', 'BinOp', 'UnaryOp',\n 'And', 'Or',\n # Exclude bitwise operators, which can construct new sets.\n 'Add', 'Sub', 'Mult', 'Div', 'Mod', 'Pow', 'LShift',\n 'RShift', 'FloorDiv',\n # Exclude Not, which can be used for cardinality tests on sets.\n 'Invert', 'UAdd', 'USub',\n # Exclude membership operators In, NotIn.\n 'Eq', 'NotEq', 'Lt', 'LtE', 'Gt', 'GtE', 'Is', 'IsNot',\n ]\n whitelist = [getattr(L, name) for name in whitelist]\n \n def generic_visit(self, node):\n if not isinstance(node, tuple(self.whitelist)):\n raise self.Invalid\n super().generic_visit(node)\n\nclass MacroUpdateRewriter(L.NodeTransformer):\n \n \"\"\"Rewrite MacroUpdates into normal set and map updates.\"\"\"\n \n # TODO: These could be refactored as macros in incast perhaps?\n \n def visit_MacroUpdate(self, node):\n op = node.op\n subst = {'TARGET': node.target,\n 'OTHER': node.other}\n # Remember that it's illegal to modify a set while iterating over it\n # in a For loop without making a copy. \n if op == 'union':\n # No copy needed because if node.target and node.other are\n # aliased, the operation has no effect.\n code = L.pc('''\n for _upelem in OTHER:\n TARGET.nsadd(_upelem)\n ''', subst=subst)\n elif op == 'inter':\n code = L.pc('''\n for _upelem in list(TARGET):\n if _upelem not in OTHER:\n TARGET.remove(_upelem)\n ''', subst=subst)\n elif op == 'diff':\n code = L.pc('''\n for _upelem in list(OTHER):\n TARGET.nsremove(_upelem)\n ''', subst=subst)\n elif op == 'symdiff':\n code = L.pc('''\n for _upelem in list(OTHER):\n if _upelem in TARGET:\n TARGET.remove(_upelem)\n else:\n TARGET.add(_upelem)\n ''', subst=subst)\n elif op == 'assign':\n code = L.pc('''\n if TARGET is not OTHER:\n while len(TARGET) > 0:\n _upelem = next(iter(TARGET))\n TARGET.remove(_upelem)\n for _upelem in OTHER:\n TARGET.add(_upelem)\n ''', subst=subst)\n elif op == 'clear':\n code = L.pc('''\n while len(TARGET) > 0:\n _upelem = next(iter(TARGET))\n TARGET.remove(_upelem)\n ''', subst=subst)\n elif op == 'mapassign':\n code = L.pc('''\n if TARGET is not OTHER:\n while len(TARGET) > 0:\n _upkey = next(iter(TARGET))\n TARGET.delkey(_upkey)\n for _upkey, _upval in OTHER.items():\n TARGET.assignkey(_upkey, _upval)\n ''', subst=subst)\n elif op == 'mapclear':\n code = L.pc('''\n while len(TARGET) > 0:\n _upkey = next(iter(TARGET))\n TARGET.delkey(_upkey)\n ''', subst=subst)\n else:\n assert()\n return code\n\nclass SetTypeRewriter(L.StmtTransformer):\n \n \"\"\"Rewrite set expressions to use incoq.runtime.Set.\n \n If set_literals is True, handle set literal expressions, including\n ones that use set(...).\n \n If orig_set_comps is True, handle set comprehensions marked\n with the in_original option.\n \"\"\"\n \n def __init__(self, namegen, *, set_literals, orig_set_comps):\n super().__init__()\n self.namegen = namegen\n self.set_literals = set_literals\n self.orig_set_comps = orig_set_comps\n \n def helper(self, node, no_update=False):\n fresh = next(self.namegen)\n \n if no_update:\n template = L.trim('''\n S_VAR = Set()\n ''')\n else:\n template = L.trim('''\n S_VAR = Set()\n L_VAR.update(EXPR)\n ''')\n new_code = L.pc(template, subst={'L_VAR': L.ln(fresh),\n 'S_VAR': L.sn(fresh),\n 'EXPR': node})\n \n self.pre_stmts.extend(new_code)\n return L.ln(fresh)\n \n def visit_Comp(self, node):\n node = self.generic_visit(node)\n \n if (self.orig_set_comps and\n node.options.get('in_original', False)):\n return self.helper(node)\n else:\n return node\n \n def visit_Set(self, node):\n node = self.generic_visit(node)\n \n if self.set_literals:\n return self.helper(node)\n else:\n return node\n \n def visit_Call(self, node):\n # Handle set(...) syntax as if it were {...}.\n if (self.set_literals and\n isinstance(node.func, L.Name) and\n node.func.id == 'set'):\n no_update = len(node.args) == 0\n return self.helper(node, no_update=no_update)\n else:\n return node\n \n def visit_For(self, node):\n # Skip the top level of node.iter, because set iteration\n # looks at the set contents, not the constructed set value.\n #\n # This is accomplished by using generic_visit() instead of\n # visit() on the iter, to avoid dispatch to any of the\n # above handlers.\n iter_result = self.generic_visit(node.iter)\n # Handle special case return values. Tuple return values\n # are not permitted in this context, so it's just the None\n # case.\n if iter_result is None:\n iter_result = node.iter\n \n target = self.visit(node.target)\n body = self.visit(node.body)\n orelse = self.visit(node.orelse)\n \n new_node = L.For(target, iter_result, body, orelse)\n # If there's no change, avoid returning a newly constructed\n # node, which would force copying up the tree.\n if new_node == node:\n new_node = node\n \n return new_node\n\nclass ObjTypeRewriter(L.NodeTransformer):\n \n \"\"\"Add incoq.runtime.Obj as a base class to all class definitions.\"\"\"\n \n def valid_baseclass(self, expr):\n if isinstance(expr, L.Name):\n return True\n elif (isinstance(expr, L.Attribute) and\n self.valid_baseclass(expr.value)):\n return True\n else:\n return False\n \n def visit_ClassDef(self, node):\n node = self.generic_visit(node)\n \n assert all(self.valid_baseclass(b) for b in node.bases), \\\n 'Illegal base class'\n objbase = L.ln('Set')\n if objbase not in node.bases:\n new_bases = node.bases + (objbase,)\n node = node._replace(bases=new_bases)\n \n return node\n\n\nclass StrictUpdateRewriter(L.NodeTransformer):\n \n \"\"\"Rewrite set, field, and/or map updates with if-guards\n to ensure that they can be considered strict. To be run\n after macro updates have already been turned into elementary\n updates.\n \"\"\"\n \n def __init__(self, *, rewrite_sets=True, rewrite_fields=True,\n rewrite_maps=True):\n super().__init__()\n self.rewrite_sets = rewrite_sets\n self.rewrite_fields = rewrite_fields\n self.rewrite_maps = rewrite_maps\n \n # No need to generic_visit() since updates can't contain\n # other updates.\n \n def visit_SetUpdate(self, node):\n if not self.rewrite_sets:\n return node\n nsop = {'add': 'nsadd',\n 'remove': 'nsremove'}[node.op]\n template = 'TARGET.{}(ELEM)'.format(nsop)\n return L.pc(template, subst={'TARGET': node.target,\n 'ELEM': node.elem})\n \n def visit_Assign(self, node):\n if not self.rewrite_fields:\n return node\n if not L.is_attrassign(node):\n return node\n cont, field, value = L.get_attrassign(node)\n return L.pc('''\n CONT.nsassignfield(FIELD, VALUE)\n ''', subst={'CONT': cont,\n 'FIELD': L.ln(field),\n 'VALUE': value})\n \n def visit_Delete(self, node):\n if not self.rewrite_fields:\n return node\n if not L.is_delattr(node):\n return node\n cont, field = L.get_delattr(node)\n return L.pc('''\n CONT.nsdelfield(FIELD)\n ''', subst={'CONT': cont,\n 'FIELD': L.ln(field)})\n \n def visit_AssignKey(self, node):\n if not self.rewrite_maps:\n return node\n return L.pc('''\n TARGET.nsassignkey(KEY, VALUE)\n ''', subst={'TARGET': node.target,\n 'KEY': node.key,\n 'VALUE': node.value})\n \n def visit_DelKey(self, node):\n if not self.rewrite_maps:\n return node\n return L.pc('''\n TARGET.nsdelkey(KEY)\n ''', subst={'TARGET': node.target,\n 'KEY': node.key})\n\n\nclass MapOpImporter(L.NodeTransformer):\n \n \"\"\"Convert assignment and deletion of map keys to AssignKey\n and DelKey nodes. Uses of the map \"globals()\" are ignored.\n \"\"\"\n \n def visit_Assign(self, node):\n if L.is_mapassign(node):\n target, key, value = L.get_mapassign(node)\n return L.AssignKey(target, key, value)\n return node\n \n def visit_Delete(self, node):\n if L.is_delmap(node):\n target, key = L.get_delmap(node)\n return L.DelKey(target, key)\n return node\n\nclass UpdateRewriter(L.NodeTransformer):\n \n \"\"\"Rewrite set and map updates to ensure that the operands\n are legal update expressions.\n \"\"\"\n \n def __init__(self, namegen):\n self.namegen = namegen\n \n # No need to recurse since we only deal with update statements,\n # which can't be nested.\n \n def visit_SetUpdate(self, node):\n target_ok = LegalUpdateValidator.run(node.target)\n elem_ok = LegalUpdateValidator.run(node.elem)\n if target_ok and elem_ok:\n return node\n \n code = ()\n if not target_ok:\n targetvar = next(self.namegen)\n code += (L.Assign((L.sn(targetvar),), node.target),)\n node = node._replace(target=L.ln(targetvar))\n if not elem_ok:\n elemvar = next(self.namegen)\n code += (L.Assign((L.sn(elemvar),), node.elem),)\n node = node._replace(elem=L.ln(elemvar))\n return code + (node,)\n \n def visit_AssignKey(self, node):\n target_ok = LegalUpdateValidator.run(node.target)\n key_ok = LegalUpdateValidator.run(node.key)\n value_ok = LegalUpdateValidator.run(node.value)\n if target_ok and key_ok and value_ok:\n return node\n \n code = ()\n if not target_ok:\n targetvar = next(self.namegen)\n code += (L.Assign((L.sn(targetvar),), node.target),)\n node = node._replace(target=L.ln(targetvar))\n if not key_ok:\n keyvar = next(self.namegen)\n code += (L.Assign((L.sn(keyvar),), node.key),)\n node = node._replace(key=L.ln(keyvar))\n if not value_ok:\n valuevar = next(self.namegen)\n code += (L.Assign((L.sn(valuevar),), node.value),)\n node = node._replace(value=L.ln(valuevar))\n \n return code + (node,)\n \n def visit_DelKey(self, node):\n target_ok = LegalUpdateValidator.run(node.target)\n key_ok = LegalUpdateValidator.run(node.key)\n if target_ok and key_ok:\n return node\n \n code = ()\n if not target_ok:\n targetvar = next(self.namegen)\n code += (L.Assign((L.sn(targetvar),), node.target),)\n node = node._replace(target=L.ln(targetvar))\n if not key_ok:\n keyvar = next(self.namegen)\n code += (L.Assign((L.sn(keyvar),), node.key),)\n node = node._replace(key=L.ln(keyvar))\n \n return code + (node,)\n\n\nclass MinMaxRewriter(L.NodeTransformer):\n \n \"\"\"If a min/max operation is over a union of set literals or\n set comprehensions, distribute the min/max to each operand\n and take the overall min/max. The overall aggregate uses the\n runtime's min2() and max2() functions, which are not\n incrementalized but allow their arguments to be None.\n \"\"\"\n \n def visit_Aggregate(self, node):\n node = self.generic_visit(node)\n \n if not node.op in ['min', 'max']:\n return node\n func2 = {'min': 'min2', 'max': 'max2'}[node.op]\n \n if not L.is_setunion(node.value):\n return node\n sets = L.get_setunion(node.value)\n if len(sets) == 1:\n # If there's just one set, don't change anything.\n return node\n \n # Wrap each operand in an aggregate query with the same\n # options as the original aggregate. (This ensures that\n # 'impl' is carried over.) Set literals are wrapped in\n # a call to incoq.runtime's min2()/max2() instead of an\n # Aggregate query node.\n terms = []\n for s in sets:\n if isinstance(s, (L.Comp, L.Name)):\n new_term = L.Aggregate(s, node.op, node.options)\n else:\n new_term = L.pe('OP(__ARGS)', subst={'OP': L.ln(func2)})\n new_term = new_term._replace(args=s.elts)\n terms.append(new_term)\n \n # The new top-level aggregate is min2()/max2().\n new_node = L.pe('OP(__ARGS)',\n subst={'OP': L.ln(func2)})\n new_node = new_node._replace(args=tuple(terms))\n return new_node\n\n\n# TODO: There are two cases where dead code elimination will fail to\n# get rid of a relation. One is when the relation is reference-counted,\n# because the reference-counted add/remove operations are already\n# broken down into operations that inspect the current refcount to\n# decide what to do. We'd have to change it so rcadd and rcremove\n# operations are not expanded until the end. This would also entail\n# changing auxmap transformation to work for rcadd/rcremove.\n#\n# The second case is when the contents of the set are read directly,\n# such as in filter checks. This could be fixed by rewriting these tests\n# to use an arbitrary map over the set instead.\n\nclass DeadCodeEliminator(L.NodeTransformer):\n \n def __init__(self, deadvars):\n self.deadvars = set(deadvars)\n \n def visit_Assign(self, node):\n if (len(node.targets) == 1 and\n isinstance(node.targets[0], L.Name) and\n node.targets[0].id in self.deadvars):\n return L.Pass()\n \n def update_helper(self, node):\n if isinstance(node.target, L.Name):\n if node.target.id in self.deadvars:\n return L.Pass()\n \n visit_SetUpdate = update_helper\n visit_RCSetRefUpdate = update_helper\n visit_AssignKey = update_helper\n visit_DelKey = update_helper\n\n\nclass PassEliminator(L.NodeTransformer):\n \n def filter_pass(self, stmts):\n \"\"\"Update a list of statements to exclude Pass nodes.\"\"\"\n if len(stmts) == 1:\n # Can't remove a lone Pass.\n return stmts\n else:\n return tuple(s for s in stmts if not isinstance(s, L.Pass))\n \n def body_helper(self, node):\n node = self.generic_visit(node)\n \n new_body = self.filter_pass(node.body)\n node = node._replace(body=new_body)\n if hasattr(node, 'orelse'):\n new_orelse = self.filter_pass(node.orelse)\n node = node._replace(orelse=new_orelse)\n \n return node\n \n visit_Module = body_helper\n visit_FunctionDef = body_helper\n visit_ClassDef = body_helper\n visit_For = body_helper\n visit_While = body_helper\n visit_If = body_helper\n visit_With = body_helper\n\n\ndef eliminate_deadcode(tree, *, keepvars=None, obj_domain_out, verbose=False):\n \"\"\"Modify the program to remove sets that are not read from.\"\"\"\n if keepvars is None:\n keepvars = set()\n keepvars = set(keepvars)\n \n # Find variables that are only written to, not read from.\n # Exclude special names and keepvars.\n special_vars = set(['__all__'])\n all_vars = L.VarsFinder.run(tree)\n read_vars = L.VarsFinder.run(\n tree, ignore_store=True)\n write_only_vars = all_vars - read_vars - special_vars - keepvars\n \n if obj_domain_out:\n # Also exclude pairsets since they will be translated into\n # actual obj-domain updates.\n for v in set(write_only_vars):\n if is_specialrel(v):\n write_only_vars.remove(v)\n \n # Delete most updates to these variables. Some cases, such as\n # the target of a For loop, are left alone.\n tree = DeadCodeEliminator.run(tree, write_only_vars)\n \n if verbose:\n if len(write_only_vars) > 0:\n print('Eliminated dead variables: ' + ', '.join(write_only_vars))\n else:\n print('No dead vars eliminated')\n \n return tree\n" }, { "alpha_fraction": 0.6200317740440369, "alphanum_fraction": 0.6200317740440369, "avg_line_length": 18.060606002807617, "blob_id": "08212cd57a4a5578e07fa57aa2e690e6d48d80b8", "content_id": "35471ebc74580b07b0c449f02b836f9053205229", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 629, "license_type": "no_license", "max_line_length": 131, "num_lines": 33, "path": "/experiments/django/django_osq.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from incoq.runtime import *\nfrom osq import query\n\nusers = Set()\n\ndef make_user(id):\n user = Obj()\n user.id = id\n user.groups = Set()\n users.add(user)\n return user\n\ndef make_group(active):\n group = Obj()\n group.active = active\n group.perms = Set()\n return group\n\ndef make_perm(name):\n perm = Obj()\n perm.name = name\n return perm\n\ndef add_group(u, g):\n u.groups.add(g)\n\ndef add_perm(g, p):\n g.perms.add(p)\n\ndef do_query(uid):\n return query('users, uid -> {p.name for u in users for g in u.groups for p in g.perms if u.id is uid if g.active}', users, uid)\n\ndo_query_nodemand = do_query\n" }, { "alpha_fraction": 0.5042287707328796, "alphanum_fraction": 0.5122835040092468, "avg_line_length": 36.33834457397461, "blob_id": "3cbc9f04a72c8b9be8610dfa07a3044813556c3e", "content_id": "25d0605897dfc4d9fdab58d3a07a22a1a6d77f80", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4966, "license_type": "no_license", "max_line_length": 78, "num_lines": 133, "path": "/incoq/tests/invinc/cost/test_interact.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Unit tests for interact.py.\"\"\"\n\n\nimport unittest\nfrom types import SimpleNamespace\n\nfrom incoq.util.unify import unify\nimport incoq.compiler.incast as L\nfrom incoq.compiler.set import Mask\nfrom incoq.compiler.comp import CompSpec\nfrom incoq.compiler.central import CentralCase\nfrom incoq.compiler.cost.cost import *\nfrom incoq.compiler.cost.interact import *\nfrom incoq.compiler.cost.interact import (make_dompath_eqs, split_resexp_vars,\n get_nondet_info, CostReinterpreter)\n\n\nclass DomainCase(CentralCase):\n \n def setUp(self):\n super().setUp()\n \n self.initsubst = {'R': ('<T>', ('<T>', 'x1', ('foo',)), 'y'),\n 'x': ('<T>', 'x1', ('foo',))}\n self.subst = add_domain_names(self.initsubst, {})\n self.domain_sizes = {'foo': NameCost('C')}\n self.cost_rules = {'R.1.1': NameCost('A'),\n 'R.2': NameCost('B')}\n self.trans = CostReinterpreter({}, self.subst, self.domain_sizes,\n self.cost_rules)\n \n def test_add_dompath_eqs(self):\n eqs = make_dompath_eqs(self.initsubst, ['R'])\n exp_eqs = [\n ('R', ('<T>', 'R.1', 'R.2')),\n ('R.1', ('<T>', 'R.1.1', 'R.1.2')),\n ('R.1.1', 'x1'),\n ('R.1.2', ('foo',)),\n ('R.2', 'y'),\n ]\n self.assertCountEqual(eqs, exp_eqs)\n \n def test_dompath_to_size(self):\n cost = self.trans.dompath_to_size('R')\n exp_cost_str = '((A*C)*B)'\n self.assertEqual(str(cost), exp_cost_str)\n \n def test_dompaths_for_mask(self):\n dompaths = self.trans.dompaths_for_mask('R', Mask.IN)\n exp_dompaths = ('R.1',)\n self.assertEqual(dompaths, exp_dompaths)\n \n dompaths = self.trans.dompaths_for_mask('R', Mask.UU)\n exp_dompaths = ('R.1', 'R.2')\n self.assertEqual(dompaths, exp_dompaths)\n \n dompaths = self.trans.dompaths_for_mask('R.2', Mask.U)\n exp_dompaths = ('R.2',)\n self.assertEqual(dompaths, exp_dompaths)\n \n dompaths = self.trans.dompaths_for_mask('R.2', Mask('b'))\n exp_dompaths = ()\n self.assertEqual(dompaths, exp_dompaths)\n \n dompaths = self.trans.dompaths_for_mask('S', Mask.IN)\n exp_dompaths = None\n self.assertEqual(dompaths, exp_dompaths)\n \n def test_reinterpreter_basic(self):\n # R + R_in\n cost = SumCost((NameCost('R'), IndefImgsetCost('R', Mask.IN)))\n cost = CostReinterpreter.run(cost, {}, self.subst, self.domain_sizes,\n self.cost_rules)\n exp_cost_str = '(((A*C)*B) + (A*C))'\n self.assertEqual(str(cost), exp_cost_str)\n\nclass CompCase(CentralCase):\n \n def test_resexp_vars(self):\n resexp = L.pe('(a + b, (c, d), (a, c, e, f))')\n bounds, unbounds = split_resexp_vars(resexp, Mask('bbu'))\n exp_bounds = {'c', 'd'}\n exp_unbounds = {'a', 'e', 'f'}\n self.assertEqual(bounds, exp_bounds)\n self.assertEqual(unbounds, exp_unbounds)\n \n def test_nondet_vars(self):\n comp = L.pe('COMP({(a, c, d) for (a, b) in R '\n 'for (b, c) in _F_ '\n 'for (c, b) in S '\n 'for (c, d) in T '\n 'for (d, e) in U}, [], {})')\n spec = CompSpec.from_comp(comp, self.manager.factory)\n result = get_nondet_info(spec, ['a'])\n exp_result = [\n ('R', Mask.OUT, {'b'}),\n ('_F_', Mask.BW, set()),\n ('S', Mask.BB, set()),\n ('T', Mask.OUT, {'d'}),\n ]\n self.assertEqual(result, exp_result)\n \n def test_reinterpreter_comp(self):\n comp1 = L.pe('COMP({(x, y, (x, z)) for (x, y) in S '\n 'for (y, z) in T}, [], {})')\n comp2 = L.pe('COMP({(x, x) for (x, y) in U}, [], {})')\n spec1 = CompSpec.from_comp(comp1, self.manager.factory)\n spec2 = CompSpec.from_comp(comp2, self.manager.factory)\n \n # Dummy wrapper for what would be IncComp.\n Dummy1 = SimpleNamespace()\n Dummy1.spec = spec1\n Dummy2 = SimpleNamespace()\n Dummy2.spec = spec2\n invs = {'Q': Dummy1, 'S': Dummy2}\n # Boilerplate domain information regarding the comprehension.\n constrs = []\n constrs.extend(spec1.get_domain_constraints('Q'))\n constrs.extend(spec2.get_domain_constraints('S'))\n domain_subst = unify(constrs)\n domain_subst = add_domain_names(domain_subst, {})\n \n trans = CostReinterpreter(invs, domain_subst, {}, {})\n \n cost = NameCost('Q')\n cost = trans.process(cost)\n cost = normalize(cost)\n exp_cost_str = '(Q_x*Q_z)'\n self.assertEqual(str(cost), exp_cost_str)\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.4284326732158661, "alphanum_fraction": 0.5184988975524902, "avg_line_length": 45.80165100097656, "blob_id": "4918191397bb796b15b70d6c5506315ed336f52a", "content_id": "c93ea7622169f37ec379dfe6c8ae2aff9adf1ff4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11325, "license_type": "no_license", "max_line_length": 186, "num_lines": 242, "path": "/incoq/tests/programs/deminc/tup/obj_out.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from incoq.runtime import *\n# Comp1 := {(s, a) : s in _U_Comp1, (s, _tup1) in _M, (_tup1, a, _) in _TUP2, (a > 1)}\n# Comp1_Ts := {s : s in _U_Comp1}\n# Comp1_d_M := {(s, _tup1) : s in Comp1_Ts, (s, _tup1) in _M}\n# Comp1_T_tup1 := {_tup1 : (s, _tup1) in Comp1_d_M}\n# Comp1_d_TUP2 := {(_tup1, a, _v1) : _tup1 in Comp1_T_tup1, (_tup1, a, _v1) in _TUP2}\n_m_Comp1_out = Map()\ndef _maint__m_Comp1_out_add(_e):\n (v23_1, v23_2) = _e\n if (v23_1 not in _m_Comp1_out):\n _m_Comp1_out[v23_1] = set()\n _m_Comp1_out[v23_1].add(v23_2)\n\ndef _maint__m_Comp1_out_remove(_e):\n (v24_1, v24_2) = _e\n _m_Comp1_out[v24_1].remove(v24_2)\n if (len(_m_Comp1_out[v24_1]) == 0):\n del _m_Comp1_out[v24_1]\n\n_m_Comp1_d_M_in = Map()\ndef _maint__m_Comp1_d_M_in_add(_e):\n (v21_1, v21_2) = _e\n if (v21_2 not in _m_Comp1_d_M_in):\n _m_Comp1_d_M_in[v21_2] = set()\n _m_Comp1_d_M_in[v21_2].add(v21_1)\n\ndef _maint__m_Comp1_d_M_in_remove(_e):\n (v22_1, v22_2) = _e\n _m_Comp1_d_M_in[v22_2].remove(v22_1)\n if (len(_m_Comp1_d_M_in[v22_2]) == 0):\n del _m_Comp1_d_M_in[v22_2]\n\n_m_Comp1_d_TUP2_bbw = Map()\ndef _maint__m_Comp1_d_TUP2_bbw_add(_e):\n (v19_1, v19_2, v19_3) = _e\n if ((v19_1, v19_2) not in _m_Comp1_d_TUP2_bbw):\n _m_Comp1_d_TUP2_bbw[(v19_1, v19_2)] = RCSet()\n if (() not in _m_Comp1_d_TUP2_bbw[(v19_1, v19_2)]):\n _m_Comp1_d_TUP2_bbw[(v19_1, v19_2)].add(())\n else:\n _m_Comp1_d_TUP2_bbw[(v19_1, v19_2)].incref(())\n\ndef _maint__m_Comp1_d_TUP2_bbw_remove(_e):\n (v20_1, v20_2, v20_3) = _e\n if (_m_Comp1_d_TUP2_bbw[(v20_1, v20_2)].getref(()) == 1):\n _m_Comp1_d_TUP2_bbw[(v20_1, v20_2)].remove(())\n else:\n _m_Comp1_d_TUP2_bbw[(v20_1, v20_2)].decref(())\n if (len(_m_Comp1_d_TUP2_bbw[(v20_1, v20_2)]) == 0):\n del _m_Comp1_d_TUP2_bbw[(v20_1, v20_2)]\n\ndef _maint_Comp1_d_TUP2_Comp1_T_tup1_add(_e):\n # Iterate {(v15__tup1, v15_a, v15__v1) : v15__tup1 in deltamatch(Comp1_T_tup1, 'b', _e, 1), (v15__tup1, v15_a, v15__v1) in _TUP2}\n v15__tup1 = _e\n if (isinstance(v15__tup1, tuple) and (len(v15__tup1) == 2)):\n for (v15_a, v15__v1) in setmatch({(v15__tup1, v15__tup1[0], v15__tup1[1])}, 'buu', v15__tup1):\n # Begin maint _m_Comp1_d_TUP2_bbw after \"Comp1_d_TUP2.add((v15__tup1, v15_a, v15__v1))\"\n _maint__m_Comp1_d_TUP2_bbw_add((v15__tup1, v15_a, v15__v1))\n # End maint _m_Comp1_d_TUP2_bbw after \"Comp1_d_TUP2.add((v15__tup1, v15_a, v15__v1))\"\n\ndef _maint_Comp1_d_TUP2_Comp1_T_tup1_remove(_e):\n # Iterate {(v16__tup1, v16_a, v16__v1) : v16__tup1 in deltamatch(Comp1_T_tup1, 'b', _e, 1), (v16__tup1, v16_a, v16__v1) in _TUP2}\n v16__tup1 = _e\n if (isinstance(v16__tup1, tuple) and (len(v16__tup1) == 2)):\n for (v16_a, v16__v1) in setmatch({(v16__tup1, v16__tup1[0], v16__tup1[1])}, 'buu', v16__tup1):\n # Begin maint _m_Comp1_d_TUP2_bbw before \"Comp1_d_TUP2.remove((v16__tup1, v16_a, v16__v1))\"\n _maint__m_Comp1_d_TUP2_bbw_remove((v16__tup1, v16_a, v16__v1))\n # End maint _m_Comp1_d_TUP2_bbw before \"Comp1_d_TUP2.remove((v16__tup1, v16_a, v16__v1))\"\n\nComp1_T_tup1 = RCSet()\ndef _maint_Comp1_T_tup1_Comp1_d_M_add(_e):\n # Iterate {(v13_s, v13__tup1) : (v13_s, v13__tup1) in deltamatch(Comp1_d_M, 'bb', _e, 1)}\n (v13_s, v13__tup1) = _e\n if (v13__tup1 not in Comp1_T_tup1):\n Comp1_T_tup1.add(v13__tup1)\n # Begin maint Comp1_d_TUP2 after \"Comp1_T_tup1.add(v13__tup1)\"\n _maint_Comp1_d_TUP2_Comp1_T_tup1_add(v13__tup1)\n # End maint Comp1_d_TUP2 after \"Comp1_T_tup1.add(v13__tup1)\"\n else:\n Comp1_T_tup1.incref(v13__tup1)\n\ndef _maint_Comp1_T_tup1_Comp1_d_M_remove(_e):\n # Iterate {(v14_s, v14__tup1) : (v14_s, v14__tup1) in deltamatch(Comp1_d_M, 'bb', _e, 1)}\n (v14_s, v14__tup1) = _e\n if (Comp1_T_tup1.getref(v14__tup1) == 1):\n # Begin maint Comp1_d_TUP2 before \"Comp1_T_tup1.remove(v14__tup1)\"\n _maint_Comp1_d_TUP2_Comp1_T_tup1_remove(v14__tup1)\n # End maint Comp1_d_TUP2 before \"Comp1_T_tup1.remove(v14__tup1)\"\n Comp1_T_tup1.remove(v14__tup1)\n else:\n Comp1_T_tup1.decref(v14__tup1)\n\nComp1_d_M = RCSet()\ndef _maint_Comp1_d_M_Comp1_Ts_add(_e):\n # Iterate {(v9_s, v9__tup1) : v9_s in deltamatch(Comp1_Ts, 'b', _e, 1), (v9_s, v9__tup1) in _M}\n v9_s = _e\n if isinstance(v9_s, Set):\n for v9__tup1 in v9_s:\n Comp1_d_M.add((v9_s, v9__tup1))\n # Begin maint _m_Comp1_d_M_in after \"Comp1_d_M.add((v9_s, v9__tup1))\"\n _maint__m_Comp1_d_M_in_add((v9_s, v9__tup1))\n # End maint _m_Comp1_d_M_in after \"Comp1_d_M.add((v9_s, v9__tup1))\"\n # Begin maint Comp1_T_tup1 after \"Comp1_d_M.add((v9_s, v9__tup1))\"\n _maint_Comp1_T_tup1_Comp1_d_M_add((v9_s, v9__tup1))\n # End maint Comp1_T_tup1 after \"Comp1_d_M.add((v9_s, v9__tup1))\"\n\ndef _maint_Comp1_d_M_Comp1_Ts_remove(_e):\n # Iterate {(v10_s, v10__tup1) : v10_s in deltamatch(Comp1_Ts, 'b', _e, 1), (v10_s, v10__tup1) in _M}\n v10_s = _e\n if isinstance(v10_s, Set):\n for v10__tup1 in v10_s:\n # Begin maint Comp1_T_tup1 before \"Comp1_d_M.remove((v10_s, v10__tup1))\"\n _maint_Comp1_T_tup1_Comp1_d_M_remove((v10_s, v10__tup1))\n # End maint Comp1_T_tup1 before \"Comp1_d_M.remove((v10_s, v10__tup1))\"\n # Begin maint _m_Comp1_d_M_in before \"Comp1_d_M.remove((v10_s, v10__tup1))\"\n _maint__m_Comp1_d_M_in_remove((v10_s, v10__tup1))\n # End maint _m_Comp1_d_M_in before \"Comp1_d_M.remove((v10_s, v10__tup1))\"\n Comp1_d_M.remove((v10_s, v10__tup1))\n\ndef _maint_Comp1_d_M__M_add(_e):\n # Iterate {(v11_s, v11__tup1) : v11_s in Comp1_Ts, (v11_s, v11__tup1) in deltamatch(_M, 'bb', _e, 1)}\n (v11_s, v11__tup1) = _e\n if (v11_s in Comp1_Ts):\n Comp1_d_M.add((v11_s, v11__tup1))\n # Begin maint _m_Comp1_d_M_in after \"Comp1_d_M.add((v11_s, v11__tup1))\"\n _maint__m_Comp1_d_M_in_add((v11_s, v11__tup1))\n # End maint _m_Comp1_d_M_in after \"Comp1_d_M.add((v11_s, v11__tup1))\"\n # Begin maint Comp1_T_tup1 after \"Comp1_d_M.add((v11_s, v11__tup1))\"\n _maint_Comp1_T_tup1_Comp1_d_M_add((v11_s, v11__tup1))\n # End maint Comp1_T_tup1 after \"Comp1_d_M.add((v11_s, v11__tup1))\"\n\nComp1_Ts = RCSet()\ndef _maint_Comp1_Ts__U_Comp1_add(_e):\n # Iterate {v7_s : v7_s in deltamatch(_U_Comp1, 'b', _e, 1)}\n v7_s = _e\n Comp1_Ts.add(v7_s)\n # Begin maint Comp1_d_M after \"Comp1_Ts.add(v7_s)\"\n _maint_Comp1_d_M_Comp1_Ts_add(v7_s)\n # End maint Comp1_d_M after \"Comp1_Ts.add(v7_s)\"\n\ndef _maint_Comp1_Ts__U_Comp1_remove(_e):\n # Iterate {v8_s : v8_s in deltamatch(_U_Comp1, 'b', _e, 1)}\n v8_s = _e\n # Begin maint Comp1_d_M before \"Comp1_Ts.remove(v8_s)\"\n _maint_Comp1_d_M_Comp1_Ts_remove(v8_s)\n # End maint Comp1_d_M before \"Comp1_Ts.remove(v8_s)\"\n Comp1_Ts.remove(v8_s)\n\nComp1 = RCSet()\ndef _maint_Comp1__U_Comp1_add(_e):\n # Iterate {(v1_s, v1__tup1, v1_a) : v1_s in deltamatch(_U_Comp1, 'b', _e, 1), (v1_s, v1__tup1) in _M, (v1__tup1, v1_a, _) in _TUP2, (v1_a > 1)}\n v1_s = _e\n if isinstance(v1_s, Set):\n for v1__tup1 in v1_s:\n if (isinstance(v1__tup1, tuple) and (len(v1__tup1) == 2)):\n for v1_a in setmatch({(v1__tup1, v1__tup1[0], v1__tup1[1])}, 'buw', v1__tup1):\n if (v1_a > 1):\n if ((v1_s, v1_a) not in Comp1):\n Comp1.add((v1_s, v1_a))\n # Begin maint _m_Comp1_out after \"Comp1.add((v1_s, v1_a))\"\n _maint__m_Comp1_out_add((v1_s, v1_a))\n # End maint _m_Comp1_out after \"Comp1.add((v1_s, v1_a))\"\n else:\n Comp1.incref((v1_s, v1_a))\n\ndef _maint_Comp1__U_Comp1_remove(_e):\n # Iterate {(v2_s, v2__tup1, v2_a) : v2_s in deltamatch(_U_Comp1, 'b', _e, 1), (v2_s, v2__tup1) in _M, (v2__tup1, v2_a, _) in _TUP2, (v2_a > 1)}\n v2_s = _e\n if isinstance(v2_s, Set):\n for v2__tup1 in v2_s:\n if (isinstance(v2__tup1, tuple) and (len(v2__tup1) == 2)):\n for v2_a in setmatch({(v2__tup1, v2__tup1[0], v2__tup1[1])}, 'buw', v2__tup1):\n if (v2_a > 1):\n if (Comp1.getref((v2_s, v2_a)) == 1):\n # Begin maint _m_Comp1_out before \"Comp1.remove((v2_s, v2_a))\"\n _maint__m_Comp1_out_remove((v2_s, v2_a))\n # End maint _m_Comp1_out before \"Comp1.remove((v2_s, v2_a))\"\n Comp1.remove((v2_s, v2_a))\n else:\n Comp1.decref((v2_s, v2_a))\n\ndef _maint_Comp1__M_add(_e):\n # Iterate {(v3_s, v3__tup1, v3_a) : v3_s in _U_Comp1, (v3_s, v3__tup1) in deltamatch(Comp1_d_M, 'bb', _e, 1), (v3_s, v3__tup1) in Comp1_d_M, (v3__tup1, v3_a, _) in _TUP2, (v3_a > 1)}\n (v3_s, v3__tup1) = _e\n if (v3_s in _U_Comp1):\n if ((v3_s, v3__tup1) in Comp1_d_M):\n if (isinstance(v3__tup1, tuple) and (len(v3__tup1) == 2)):\n for v3_a in setmatch({(v3__tup1, v3__tup1[0], v3__tup1[1])}, 'buw', v3__tup1):\n if (v3_a > 1):\n if ((v3_s, v3_a) not in Comp1):\n Comp1.add((v3_s, v3_a))\n # Begin maint _m_Comp1_out after \"Comp1.add((v3_s, v3_a))\"\n _maint__m_Comp1_out_add((v3_s, v3_a))\n # End maint _m_Comp1_out after \"Comp1.add((v3_s, v3_a))\"\n else:\n Comp1.incref((v3_s, v3_a))\n\n_U_Comp1 = RCSet()\n_UEXT_Comp1 = Set()\ndef demand_Comp1(s):\n '{(s, a) : s in _U_Comp1, (s, _tup1) in _M, (_tup1, a, _) in _TUP2, (a > 1)}'\n if (s not in _U_Comp1):\n _U_Comp1.add(s)\n # Begin maint Comp1_Ts after \"_U_Comp1.add(s)\"\n _maint_Comp1_Ts__U_Comp1_add(s)\n # End maint Comp1_Ts after \"_U_Comp1.add(s)\"\n # Begin maint Comp1 after \"_U_Comp1.add(s)\"\n _maint_Comp1__U_Comp1_add(s)\n # End maint Comp1 after \"_U_Comp1.add(s)\"\n else:\n _U_Comp1.incref(s)\n\ndef undemand_Comp1(s):\n '{(s, a) : s in _U_Comp1, (s, _tup1) in _M, (_tup1, a, _) in _TUP2, (a > 1)}'\n if (_U_Comp1.getref(s) == 1):\n # Begin maint Comp1 before \"_U_Comp1.remove(s)\"\n _maint_Comp1__U_Comp1_remove(s)\n # End maint Comp1 before \"_U_Comp1.remove(s)\"\n # Begin maint Comp1_Ts before \"_U_Comp1.remove(s)\"\n _maint_Comp1_Ts__U_Comp1_remove(s)\n # End maint Comp1_Ts before \"_U_Comp1.remove(s)\"\n _U_Comp1.remove(s)\n else:\n _U_Comp1.decref(s)\n\ndef query_Comp1(s):\n '{(s, a) : s in _U_Comp1, (s, _tup1) in _M, (_tup1, a, _) in _TUP2, (a > 1)}'\n if (s not in _UEXT_Comp1):\n _UEXT_Comp1.add(s)\n demand_Comp1(s)\n return True\n\ns = Set()\nfor (x, y) in [(1, 2), (2, 3), (3, 4)]:\n s.add((x, y))\n # Begin maint Comp1_d_M after \"_M.add((s, (x, y)))\"\n _maint_Comp1_d_M__M_add((s, (x, y)))\n # End maint Comp1_d_M after \"_M.add((s, (x, y)))\"\n # Begin maint Comp1 after \"_M.add((s, (x, y)))\"\n _maint_Comp1__M_add((s, (x, y)))\n # End maint Comp1 after \"_M.add((s, (x, y)))\"\nprint(sorted((query_Comp1(s) and (_m_Comp1_out[s] if (s in _m_Comp1_out) else set()))))" }, { "alpha_fraction": 0.4629654586315155, "alphanum_fraction": 0.4641841650009155, "avg_line_length": 41.442527770996094, "blob_id": "0c02fddc2a8c49610f39ba85df9d5589543f1587", "content_id": "1c3bf4ed83203233797c29096d7d4be2f36c0e28", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7385, "license_type": "no_license", "max_line_length": 77, "num_lines": 174, "path": "/incoq/tests/invinc/cost/test_cost.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Unit tests for cost.py.\"\"\"\n\n\nimport unittest\n\nfrom incoq.compiler.set import Mask\nfrom incoq.compiler.cost.cost import *\nfrom incoq.compiler.cost.cost import (\n without_duplicates, all_products_dominated,\n all_sums_of_products_dominate,\n simplify_sum_of_products, simplify_min_of_sums,\n multiply_sums_of_products)\n\n\nclass CostCase(unittest.TestCase):\n \n def test_cost(self):\n cost = SumCost.from_sums([SumCost((NameCost('a'), NameCost('b'))),\n SumCost((NameCost('c'),))])\n exp_cost = SumCost((NameCost('a'), NameCost('b'), NameCost('c')))\n self.assertEqual(cost, exp_cost)\n \n def test_visitor(self):\n class Foo(CostTransformer):\n def visit_NameCost(self, cost):\n return NameCost('z')\n \n cost = SumCost((NameCost('a'), NameCost('b')))\n cost = Foo.run(cost)\n exp_cost = SumCost((NameCost('z'), NameCost('z')))\n self.assertEqual(cost, exp_cost)\n \n def test_substitute(self):\n cost = SumCost((NameCost('a'), DefImgsetCost('R', Mask.OUT, ('b',))))\n subst = {NameCost('a'): NameCost('x'),\n IndefImgsetCost('R', Mask.OUT): NameCost('y')}\n \n cost = CostSubstitutor.run(cost, subst, subsume_maps=False)\n exp_cost = SumCost((NameCost('x'),\n DefImgsetCost('R', Mask.OUT, ('b',))))\n self.assertEqual(cost, exp_cost)\n \n cost = CostSubstitutor.run(cost, subst, subsume_maps=True)\n exp_cost = SumCost((NameCost('x'), NameCost('y')))\n self.assertEqual(cost, exp_cost)\n \n def test_prettyprint(self):\n cost = SumCost((ProductCost((NameCost('a'), NameCost('b'),\n NameCost('a'), NameCost('b'),\n NameCost('b'), NameCost('c'),\n NameCost('a'))),\n ProductCost((UnknownCost(), UnitCost()))))\n output = PrettyPrinter.run(cost)\n exp_output = '((a^3*b^3*c) + (1*?))'\n self.assertEqual(output, exp_output)\n \n def test_simplifier(self):\n cost = SumCost((UnitCost(), NameCost('a'), NameCost('a')))\n cost = Simplifier.run(cost)\n exp_cost = NameCost('a')\n self.assertEqual(cost, exp_cost)\n \n def test_elim_duplicate(self):\n # a + a + b -> a + b\n cost = SumCost((NameCost('a'), NameCost('a'), NameCost('b')))\n cost = without_duplicates(cost)\n exp_cost = SumCost((NameCost('a'), NameCost('b')))\n self.assertEqual(cost, exp_cost)\n \n def test_products_dominated(self):\n pc, nc = ProductCost, NameCost\n # [a*a, a*b]\n right = [pc((nc('a'), nc('a'))),\n pc((nc('a'), nc('b')))]\n # [a*a, a]\n left1 = [pc((nc('a'), nc('a'))),\n pc((nc('a'),))]\n # [a, b, c]\n left2 = [pc((nc('a'),)),\n pc((nc('b'),)),\n pc((nc('c'),))]\n \n self.assertTrue(all_products_dominated(left1, right))\n self.assertFalse(all_products_dominated(left2, right))\n \n self.assertTrue(all_products_dominated(\n [pc((UnitCost(),))], [pc((NameCost('a'),))]))\n \n def test_sums_dominate(self):\n sc, pc, nc = SumCost, ProductCost, NameCost\n # [a*a*a*b + a*b*b, a*b*b*b*b]\n left = [sc((pc((nc('a'), nc('a'), nc('a'), nc('b'))),\n pc((nc('a'), nc('b'), nc('b'))))),\n sc((pc((nc('a'), nc('b'), nc('b'), nc('b'), nc('b'))),))]\n # [a*a*b + a*b*b, a*b*b*b + 1]\n right = [sc((pc((nc('a'), nc('a'), nc('b'))),\n pc((nc('a'), nc('b'), nc('b'))))),\n sc((pc((nc('a'), nc('b'), nc('b'), nc('b'))),\n pc((UnitCost(),))))]\n \n self.assertTrue(all_sums_of_products_dominate(left, right))\n self.assertFalse(all_sums_of_products_dominate(right, left))\n \n def test_simplify_sum_of_products(self):\n # a*a + a*a + a + a*b -> a*a + a*b\n cost = SumCost((ProductCost((NameCost('a'), NameCost('a'))),\n ProductCost((NameCost('a'), NameCost('a'))),\n ProductCost((NameCost('a'),)),\n ProductCost((NameCost('a'), NameCost('b')))))\n cost = simplify_sum_of_products(cost)\n exp_cost = SumCost((ProductCost((NameCost('a'), NameCost('a'))),\n ProductCost((NameCost('a'), NameCost('b')))))\n self.assertEqual(cost, exp_cost)\n \n # a*b + b*a -> a*b\n cost = SumCost((ProductCost((NameCost('a'), NameCost('b'))),\n ProductCost((NameCost('b'), NameCost('a')))))\n cost = simplify_sum_of_products(cost)\n exp_cost = SumCost((ProductCost((NameCost('a'), NameCost('b'))),))\n self.assertEqual(cost, exp_cost)\n \n def test_simplify_min_of_sums(self):\n # min(a + b, a*a*a, b)\n cost = MinCost((SumCost((ProductCost((NameCost('a'),)),\n ProductCost((NameCost('b'),)))),\n SumCost((ProductCost((NameCost('a'), NameCost('a'),\n NameCost('a'))),)),\n SumCost((ProductCost((NameCost('b'),)),))))\n cost = simplify_min_of_sums(cost)\n exp_cost_str = 'min(((a*a*a)), ((b)))'\n self.assertEqual(str(cost), exp_cost_str)\n \n def test_multiply_sums_of_products(self):\n # [(a + b), (c + d), (e + f)]\n costs = [SumCost((ProductCost((NameCost('a'),)),\n ProductCost((NameCost('b'),)))),\n SumCost((ProductCost((NameCost('c'),)),\n ProductCost((NameCost('d'),)))),\n SumCost((ProductCost((NameCost('e'),)),\n ProductCost((NameCost('f'),))))]\n cost = multiply_sums_of_products(costs)\n exp_cost_str = ('((a*c*e) + (a*c*f) + (a*d*e) + (a*d*f) + '\n '(b*c*e) + (b*c*f) + (b*d*e) + (b*d*f))')\n self.assertEqual(str(cost), exp_cost_str)\n \n def test_normalize(self):\n cost = ProductCost((SumCost((NameCost('a'), NameCost('b'))),\n SumCost((NameCost('c'), NameCost('d'))),\n SumCost((NameCost('e'), NameCost('f')))))\n cost = normalize(cost)\n exp_cost_str = ('((a*c*e) + (a*c*f) + (a*d*e) + (a*d*f) + '\n '(b*c*e) + (b*c*f) + (b*d*e) + (b*d*f))')\n self.assertEqual(str(cost), exp_cost_str)\n \n def test_lteq(self):\n sc, pc, nc = SumCost, ProductCost, NameCost\n # min(a*a*b + a*b*b, a*b*b*b + 1)\n left = [sc((pc((nc('a'), nc('a'), nc('b'))),\n pc((nc('a'), nc('b'), nc('b'))))),\n sc((pc((nc('a'), nc('b'), nc('b'), nc('b'))),\n pc((UnitCost(),))))]\n left = MinCost(left)\n # min(a*a*a*b + a*b*b, a*b*b*b*b)\n right = [sc((pc((nc('a'), nc('a'), nc('a'), nc('b'))),\n pc((nc('a'), nc('b'), nc('b'))))),\n sc((pc((nc('a'), nc('b'), nc('b'), nc('b'), nc('b'))),))]\n right = MinCost(right)\n \n self.assertTrue(lteq(left, right))\n self.assertFalse(lteq(right, left))\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.416769802570343, "alphanum_fraction": 0.4879331588745117, "avg_line_length": 35.7386360168457, "blob_id": "e0202b1886067071a4e7ec570d12a8564dc31c6f", "content_id": "172e1a692199322be94aae9b7ea7a13ca51302ea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3232, "license_type": "no_license", "max_line_length": 153, "num_lines": 88, "path": "/incoq/tests/programs/comp/deltawild_out.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from incoq.runtime import *\n# Comp1 := {(x, w) : (x, _, z) in S, (z, w) in T}\n_m_S_uwb = Map()\ndef _maint__m_S_uwb_add(_e):\n (v9_1, v9_2, v9_3) = _e\n if (v9_3 not in _m_S_uwb):\n _m_S_uwb[v9_3] = RCSet()\n if (v9_1 not in _m_S_uwb[v9_3]):\n _m_S_uwb[v9_3].add(v9_1)\n else:\n _m_S_uwb[v9_3].incref(v9_1)\n\n_m_T_out = Map()\ndef _maint__m_T_out_add(_e):\n (v7_1, v7_2) = _e\n if (v7_1 not in _m_T_out):\n _m_T_out[v7_1] = set()\n _m_T_out[v7_1].add(v7_2)\n\ndef _maint__m_T_out_remove(_e):\n (v8_1, v8_2) = _e\n _m_T_out[v8_1].remove(v8_2)\n if (len(_m_T_out[v8_1]) == 0):\n del _m_T_out[v8_1]\n\n_m_S_bwb = Map()\ndef _maint__m_S_bwb_add(_e):\n (v5_1, v5_2, v5_3) = _e\n if ((v5_1, v5_3) not in _m_S_bwb):\n _m_S_bwb[(v5_1, v5_3)] = RCSet()\n if (() not in _m_S_bwb[(v5_1, v5_3)]):\n _m_S_bwb[(v5_1, v5_3)].add(())\n else:\n _m_S_bwb[(v5_1, v5_3)].incref(())\n\nComp1 = RCSet()\ndef _maint_Comp1_S_add(_e):\n # Iterate {(v1_x, v1_z, v1_w) : (v1_x, _, v1_z) in deltamatch(S, 'bwb', _e, 1), (v1_z, v1_w) in T}\n for (v1_x, v1_z) in setmatch(({_e} if ((_m_S_bwb[(_e[0], _e[2])] if ((_e[0], _e[2]) in _m_S_bwb) else RCSet()).getref(()) == 1) else {}), 'uwu', ()):\n for v1_w in (_m_T_out[v1_z] if (v1_z in _m_T_out) else set()):\n if ((v1_x, v1_w) not in Comp1):\n Comp1.add((v1_x, v1_w))\n else:\n Comp1.incref((v1_x, v1_w))\n\ndef _maint_Comp1_T_add(_e):\n # Iterate {(v3_x, v3_z, v3_w) : (v3_x, _, v3_z) in S, (v3_z, v3_w) in deltamatch(T, 'bb', _e, 1)}\n (v3_z, v3_w) = _e\n for v3_x in (_m_S_uwb[v3_z] if (v3_z in _m_S_uwb) else RCSet()):\n if ((v3_x, v3_w) not in Comp1):\n Comp1.add((v3_x, v3_w))\n else:\n Comp1.incref((v3_x, v3_w))\n\ndef _maint_Comp1_T_remove(_e):\n # Iterate {(v4_x, v4_z, v4_w) : (v4_x, _, v4_z) in S, (v4_z, v4_w) in deltamatch(T, 'bb', _e, 1)}\n (v4_z, v4_w) = _e\n for v4_x in (_m_S_uwb[v4_z] if (v4_z in _m_S_uwb) else RCSet()):\n if (Comp1.getref((v4_x, v4_w)) == 1):\n Comp1.remove((v4_x, v4_w))\n else:\n Comp1.decref((v4_x, v4_w))\n\nfor (v1, v2) in [(2, 4), (3, 5)]:\n # Begin maint _m_T_out after \"T.add((v1, v2))\"\n _maint__m_T_out_add((v1, v2))\n # End maint _m_T_out after \"T.add((v1, v2))\"\n # Begin maint Comp1 after \"T.add((v1, v2))\"\n _maint_Comp1_T_add((v1, v2))\n # End maint Comp1 after \"T.add((v1, v2))\"\nfor (v1, v2, v3) in [(1, 1, 2), (1, 2, 2), (1, 2, 3)]:\n # Begin maint _m_S_uwb after \"S.add((v1, v2, v3))\"\n _maint__m_S_uwb_add((v1, v2, v3))\n # End maint _m_S_uwb after \"S.add((v1, v2, v3))\"\n # Begin maint _m_S_bwb after \"S.add((v1, v2, v3))\"\n _maint__m_S_bwb_add((v1, v2, v3))\n # End maint _m_S_bwb after \"S.add((v1, v2, v3))\"\n # Begin maint Comp1 after \"S.add((v1, v2, v3))\"\n _maint_Comp1_S_add((v1, v2, v3))\n # End maint Comp1 after \"S.add((v1, v2, v3))\"\nprint(sorted(Comp1))\n# Begin maint Comp1 before \"T.remove((2, 4))\"\n_maint_Comp1_T_remove((2, 4))\n# End maint Comp1 before \"T.remove((2, 4))\"\n# Begin maint _m_T_out before \"T.remove((2, 4))\"\n_maint__m_T_out_remove((2, 4))\n# End maint _m_T_out before \"T.remove((2, 4))\"\nprint(sorted(Comp1))" }, { "alpha_fraction": 0.44333910942077637, "alphanum_fraction": 0.5229238867759705, "avg_line_length": 37.223140716552734, "blob_id": "b6f7d81dee7ff75a09a425bc246dbb71b178bf5f", "content_id": "f625f6b4161a6258ae74ab0277e617b7140ab2c6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4624, "license_type": "no_license", "max_line_length": 106, "num_lines": 121, "path": "/incoq/tests/programs/deminc/nocheck_out.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from incoq.runtime import *\n# Comp1 := {(x, z) : (x, y) in E, (y, z) in E}\n# Comp1_Ty1 := {y : (x, y) in E}\n# Comp1_dE2 := {(y, z) : y in Comp1_Ty1, (y, z) in E}\n_m_Comp1_out = Map()\ndef _maint__m_Comp1_out_add(_e):\n (v15_1, v15_2) = _e\n if (v15_1 not in _m_Comp1_out):\n _m_Comp1_out[v15_1] = set()\n _m_Comp1_out[v15_1].add(v15_2)\n\ndef _maint__m_Comp1_out_remove(_e):\n (v16_1, v16_2) = _e\n _m_Comp1_out[v16_1].remove(v16_2)\n if (len(_m_Comp1_out[v16_1]) == 0):\n del _m_Comp1_out[v16_1]\n\n_m_E_in = Map()\ndef _maint__m_E_in_add(_e):\n (v13_1, v13_2) = _e\n if (v13_2 not in _m_E_in):\n _m_E_in[v13_2] = set()\n _m_E_in[v13_2].add(v13_1)\n\n_m_Comp1_dE2_out = Map()\ndef _maint__m_Comp1_dE2_out_add(_e):\n (v11_1, v11_2) = _e\n if (v11_1 not in _m_Comp1_dE2_out):\n _m_Comp1_dE2_out[v11_1] = set()\n _m_Comp1_dE2_out[v11_1].add(v11_2)\n\ndef _maint__m_Comp1_dE2_out_remove(_e):\n (v12_1, v12_2) = _e\n _m_Comp1_dE2_out[v12_1].remove(v12_2)\n if (len(_m_Comp1_dE2_out[v12_1]) == 0):\n del _m_Comp1_dE2_out[v12_1]\n\n_m_E_out = Map()\ndef _maint__m_E_out_add(_e):\n (v9_1, v9_2) = _e\n if (v9_1 not in _m_E_out):\n _m_E_out[v9_1] = set()\n _m_E_out[v9_1].add(v9_2)\n\ndef _maint_Comp1_dE2_Comp1_Ty1_add(_e):\n # Iterate {(v5_y, v5_z) : v5_y in deltamatch(Comp1_Ty1, 'b', _e, 1), (v5_y, v5_z) in E}\n v5_y = _e\n for v5_z in (_m_E_out[v5_y] if (v5_y in _m_E_out) else set()):\n # Begin maint _m_Comp1_dE2_out after \"Comp1_dE2.add((v5_y, v5_z))\"\n _maint__m_Comp1_dE2_out_add((v5_y, v5_z))\n # End maint _m_Comp1_dE2_out after \"Comp1_dE2.add((v5_y, v5_z))\"\n\ndef _maint_Comp1_dE2_Comp1_Ty1_remove(_e):\n # Iterate {(v6_y, v6_z) : v6_y in deltamatch(Comp1_Ty1, 'b', _e, 1), (v6_y, v6_z) in E}\n v6_y = _e\n for v6_z in (_m_E_out[v6_y] if (v6_y in _m_E_out) else set()):\n # Begin maint _m_Comp1_dE2_out before \"Comp1_dE2.remove((v6_y, v6_z))\"\n _maint__m_Comp1_dE2_out_remove((v6_y, v6_z))\n # End maint _m_Comp1_dE2_out before \"Comp1_dE2.remove((v6_y, v6_z))\"\n\ndef _maint_Comp1_dE2_E_add(_e):\n # Iterate {(v7_y, v7_z) : v7_y in Comp1_Ty1, (v7_y, v7_z) in deltamatch(E, 'bb', _e, 1)}\n (v7_y, v7_z) = _e\n if (v7_y in Comp1_Ty1):\n # Begin maint _m_Comp1_dE2_out after \"Comp1_dE2.add((v7_y, v7_z))\"\n _maint__m_Comp1_dE2_out_add((v7_y, v7_z))\n # End maint _m_Comp1_dE2_out after \"Comp1_dE2.add((v7_y, v7_z))\"\n\nComp1_Ty1 = RCSet()\ndef _maint_Comp1_Ty1_E_add(_e):\n # Iterate {(v3_x, v3_y) : (v3_x, v3_y) in deltamatch(E, 'bb', _e, 1)}\n (v3_x, v3_y) = _e\n if (v3_y not in Comp1_Ty1):\n Comp1_Ty1.add(v3_y)\n # Begin maint Comp1_dE2 after \"Comp1_Ty1.add(v3_y)\"\n _maint_Comp1_dE2_Comp1_Ty1_add(v3_y)\n # End maint Comp1_dE2 after \"Comp1_Ty1.add(v3_y)\"\n else:\n Comp1_Ty1.incref(v3_y)\n\nComp1 = RCSet()\ndef _maint_Comp1_E_add(_e):\n v1_DAS = set()\n # Iterate {(v1_x, v1_y, v1_z) : (v1_x, v1_y) in deltamatch(E, 'bb', _e, 1), (v1_y, v1_z) in Comp1_dE2}\n (v1_x, v1_y) = _e\n for v1_z in (_m_Comp1_dE2_out[v1_y] if (v1_y in _m_Comp1_dE2_out) else set()):\n if ((v1_x, v1_y, v1_z) not in v1_DAS):\n v1_DAS.add((v1_x, v1_y, v1_z))\n # Iterate {(v1_x, v1_y, v1_z) : (v1_x, v1_y) in E, (v1_y, v1_z) in deltamatch(E, 'bb', _e, 1)}\n (v1_y, v1_z) = _e\n for v1_x in (_m_E_in[v1_y] if (v1_y in _m_E_in) else set()):\n if ((v1_x, v1_y, v1_z) not in v1_DAS):\n v1_DAS.add((v1_x, v1_y, v1_z))\n for (v1_x, v1_y, v1_z) in v1_DAS:\n if ((v1_x, v1_z) not in Comp1):\n Comp1.add((v1_x, v1_z))\n # Begin maint _m_Comp1_out after \"Comp1.add((v1_x, v1_z))\"\n _maint__m_Comp1_out_add((v1_x, v1_z))\n # End maint _m_Comp1_out after \"Comp1.add((v1_x, v1_z))\"\n else:\n Comp1.incref((v1_x, v1_z))\n del v1_DAS\n\nfor (a, b) in {(1, 2), (2, 3), (2, 4)}:\n # Begin maint _m_E_in after \"E.add((a, b))\"\n _maint__m_E_in_add((a, b))\n # End maint _m_E_in after \"E.add((a, b))\"\n # Begin maint _m_E_out after \"E.add((a, b))\"\n _maint__m_E_out_add((a, b))\n # End maint _m_E_out after \"E.add((a, b))\"\n # Begin maint Comp1_dE2 after \"E.add((a, b))\"\n _maint_Comp1_dE2_E_add((a, b))\n # End maint Comp1_dE2 after \"E.add((a, b))\"\n # Begin maint Comp1_Ty1 after \"E.add((a, b))\"\n _maint_Comp1_Ty1_E_add((a, b))\n # End maint Comp1_Ty1 after \"E.add((a, b))\"\n # Begin maint Comp1 after \"E.add((a, b))\"\n _maint_Comp1_E_add((a, b))\n # End maint Comp1 after \"E.add((a, b))\"\nx = 1\nprint(sorted((_m_Comp1_out[x] if (x in _m_Comp1_out) else set())))" }, { "alpha_fraction": 0.7176470756530762, "alphanum_fraction": 0.7176470756530762, "avg_line_length": 17.88888931274414, "blob_id": "7ffeee23ac62e67f798a8325349b62991d763bb8", "content_id": "032f7e0989bb8cdf3b683d8f6fec49056a40efaf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 170, "license_type": "no_license", "max_line_length": 37, "num_lines": 9, "path": "/incoq/compiler/obj/__init__.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Support for object-set queries.\"\"\"\n\n\n# Exports.\nfrom .pairrel import *\nfrom .match import *\nfrom .objclause import *\nfrom .objcomp import *\nfrom .domaintrans import *\n" }, { "alpha_fraction": 0.547432005405426, "alphanum_fraction": 0.5480966567993164, "avg_line_length": 34.063560485839844, "blob_id": "0caa903ddba8cc20564bb023418ba7f1af4919b1", "content_id": "ff21784099c540346a484f5fad90eaf60c6afd71", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16550, "license_type": "no_license", "max_line_length": 78, "num_lines": 472, "path": "/incoq/compiler/incast/nodeconv.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Macro processors for converting between IncAST nodes and their\nPyAST equivalent operations.\n\"\"\"\n\n\n__all__ = [\n 'OptionsRewriter',\n 'IncLangImporter',\n 'IncLangExporter',\n 'comp_to_setcomp',\n]\n\n\nfrom numbers import Number\n\nfrom incoq.util.collections import make_frozen, frozendict\n\nfrom .nodes import *\nfrom .structconv import (NodeTransformer, parse_structast, MacroProcessor,\n PatternTransformer, astargs, literal_eval)\n\n\ndef frozen_eval(tree):\n return make_frozen(literal_eval(tree))\n\ndef frozen_eval_dict(d):\n # As above but the keys are strings instead of ASTs.\n return {k: frozen_eval(v) for k, v in d.items()}\n\ndef value_to_ast(value):\n \"\"\"Turn a structure of python values into an AST for a literal\n expression. Valid values include dictionaries, sets, lists,\n tuples, bools, numbers, strings, and None. ASTs are created with\n Load context. Set and dictionary contents are sorted.\n \"\"\"\n if isinstance(value, (dict, frozendict)):\n if len(value) == 0:\n return Dict((), ())\n keys, vals = zip(*sorted(value.items()))\n keys = tuple(value_to_ast(k) for k in keys)\n vals = tuple(value_to_ast(v) for v in vals)\n return Dict(keys, vals)\n \n elif isinstance(value, (set, frozenset)):\n items = tuple(sorted(value_to_ast(i) for i in value))\n return Set(items)\n elif isinstance(value, list):\n items = tuple(value_to_ast(i) for i in value)\n return List(items, Load())\n elif isinstance(value, tuple):\n items = tuple(value_to_ast(i) for i in value)\n return Tuple(items, Load())\n \n elif isinstance(value, bool):\n return NameConstant(value)\n elif isinstance(value, Number):\n return Num(value)\n elif isinstance(value, str):\n return Str(value)\n \n elif isinstance(value, type(None)):\n return NameConstant(None)\n \n else:\n raise TypeError('Can\\'t convert value to AST: ' + repr(value))\n\n\nclass OptionsRewriter(PatternTransformer):\n \n \"\"\"Rewrite incoq.runtime.OPTIONS and incoq.runtime.QUERYOPTIONS\n calls to elimiante the incoq.runtime qualifier.\n \"\"\"\n \n rules = [(parse_structast('incoq.runtime.OPTIONS', mode='expr'),\n lambda **mapping: parse_structast('OPTIONS', mode='expr')),\n (parse_structast('incoq.runtime.QUERYOPTIONS', mode='expr'),\n lambda **mapping: parse_structast('QUERYOPTIONS', mode='expr'))]\n\nclass IncLangImporter(MacroProcessor):\n \n \"\"\"Expand PyAST patterns that encode IncAST nodes.\"\"\"\n \n # Misc operations.\n \n @astargs\n def handle_fs_Comment(self, f, text:'Str'):\n return Comment(text)\n \n @astargs\n def handle_fs_OPTIONS(self, f, **opts):\n return NOptions(frozen_eval_dict(opts))\n \n @astargs\n def handle_fs_QUERYOPTIONS(self, f, query:'Str', **opts):\n return QOptions(query, frozen_eval_dict(opts))\n \n @astargs\n def handle_fw_MAINT(self, f, name:'Name', when:'Str', desc:'Str', _body):\n _body = self.visit(_body)\n if when == 'before':\n precode = _body[:-1]\n update = _body[-1:]\n postcode = ()\n elif when == 'after':\n precode = ()\n update = _body[0:1]\n postcode = _body[1:]\n else:\n assert()\n \n return Maintenance(name, desc, precode, update, postcode)\n \n # Allows specifying params and options for a Comp.\n @astargs\n def handle_fe_COMP(self, f, comp, params=None, options=None):\n comp = self.generic_visit(comp)\n \n # TODO: refactor this arg processing into astargs.\n \n if params is not None:\n if isinstance(params, NameConstant) and params.value is None:\n params = None\n else:\n if not (isinstance(params, (List, Tuple)) and\n all(isinstance(e, Name) for e in params.elts)):\n raise TypeError('Expected list of identifiers')\n params = tuple(p.id for p in params.elts)\n \n if options is not None:\n if isinstance(options, NameConstant) and options.value is None:\n options = None\n else:\n options = frozen_eval(options)\n \n comp = comp._replace(params=params, options=options)\n return comp\n \n @astargs\n def handle_fe_DEMQUERY(self, f, demname:'Name', args:'List', value):\n if isinstance(value, NameConstant) and value.value is None:\n value = None\n return DemQuery(demname, args, value)\n \n def handle_fe_NODEMQUERY(self, f, value):\n return NoDemQuery(value)\n \n handle_fe_NODEMAND = handle_fe_NODEMQUERY\n \n # Set operations.\n \n def handle_ms_add(self, f, target, elem):\n return SetUpdate(target, 'add', elem)\n \n def handle_ms_remove(self, f, target, elem):\n return SetUpdate(target, 'remove', elem)\n \n def handle_ms_update(self, f, target, other):\n return MacroUpdate(target, 'union', other)\n \n def handle_ms_intersection_update(self, f, target, other):\n return MacroUpdate(target, 'inter', other)\n \n def handle_ms_difference_update(self, f, target, other):\n return MacroUpdate(target, 'diff', other)\n \n def handle_ms_symmetric_difference_update(self, f, target, other):\n return MacroUpdate(target, 'symdiff', other)\n \n def handle_ms_assign_update(self, f, target, other):\n return MacroUpdate(target, 'assign', other)\n \n def handle_ms_clear(self, f, target):\n return MacroUpdate(target, 'clear', None)\n \n def handle_ms_mapassign_update(self, f, target, other):\n return MacroUpdate(target, 'mapassign', other)\n \n def handle_ms_mapclear(self, f, target):\n return MacroUpdate(target, 'mapclear', None)\n \n def handle_ms_incref(self, f, target, elem):\n return RCSetRefUpdate(target, 'incref', elem)\n \n def handle_ms_decref(self, f, target, elem):\n return RCSetRefUpdate(target, 'decref', elem)\n \n def handle_me_isempty(self, f, target):\n return IsEmpty(target)\n \n def handle_me_getref(self, f, target, elem):\n return GetRef(target, elem)\n \n # Map operations.\n \n def handle_ms_assignkey(self, f, target, key, value):\n return AssignKey(target, key, value)\n \n def handle_ms_delkey(self, f, target, key):\n return DelKey(target, key)\n \n def handle_me_lookup(self, f, target, key):\n return Lookup(target, key, None)\n \n def handle_me_deflookup(self, f, target, key, default):\n return Lookup(target, key, default)\n \n def handle_me_imglookup(self, f, target, key):\n return ImgLookup(target, key)\n \n def handle_me_rcimglookup(self, f, target, key):\n return RCImgLookup(target, key)\n \n # Setmap operations.\n \n @astargs\n def handle_me_smlookup(self, f, target, mask:'Str', key):\n return SMLookup(target, mask, key, None)\n \n @astargs\n def handle_me_smdeflookup(self, f, target, mask:'Str', key, default):\n return SMLookup(target, mask, key, default)\n \n # Query operations.\n \n @astargs\n def handle_fe_setmatch(self, f, target, mask:'Str', key):\n return SetMatch(target, mask, key)\n \n @astargs\n def handle_fe_deltamatch(self, f, target, mask:'Str', elem, limit:'Num'):\n return DeltaMatch(target, mask, elem, limit)\n \n def visit_SetComp(self, node):\n node = self.generic_visit(node)\n \n # Turn a SetComp's generators into a list of\n # Enumerator and expression nodes.\n clauses = []\n for gen in node.generators:\n ifs = gen.ifs\n enum = Enumerator(gen.target, gen.iter)\n clauses.append(enum)\n clauses.extend(ifs)\n \n return Comp(node.elt, tuple(clauses), None, None)\n \n @astargs\n def aggr_helper(self, f, value, options=None):\n assert f in ['count', 'sum', 'min', 'max'], \\\n 'Unknown aggregate \"{}\"'.format(f)\n \n if options is not None:\n if isinstance(options, NameConstant) and options.id is None:\n options = None\n else:\n options = frozen_eval(options)\n \n return Aggregate(value, f, options)\n \n handle_fe_count = aggr_helper\n handle_fe_sum = aggr_helper\n handle_fe_min = aggr_helper\n handle_fe_max = aggr_helper\n\n\nclass IncLangExporter(NodeTransformer):\n \n \"\"\"Export IncAST-specific nodes to PyAST format.\n \n Some information is lost. This includes options dictionaries\n and query parameter info. The result is not necessarily\n round-trippable.\n \"\"\"\n \n def pc(self, source, subst=None):\n return parse_structast(source, mode='code', subst=subst)\n \n def pe(self, source, subst=None):\n return parse_structast(source, mode='expr', subst=subst)\n \n def visit_NOptions(self, node):\n return self.pc('OPTIONS(...)')\n \n def visit_QOptions(self, node):\n return self.pc('QUERYOPTIONS(QSTR, ...)',\n subst={'QSTR': node.query})\n \n def visit_Maintenance(self, node):\n node = self.generic_visit(node)\n \n precode = node.precode\n if len(precode) > 0:\n precode = precode + (Comment('^-- Precode'),)\n postcode = node.postcode\n if len(postcode) > 0:\n postcode = (Comment('Postcode --v'),) + postcode\n \n return self.pc('''\n with MAINT(NAME, DESC):\n PRECODE\n UPDATE\n POSTCODE\n ''', subst={'NAME': Name(node.name, Load()),\n 'DESC': Str(node.desc),\n '<c>PRECODE': precode,\n '<c>UPDATE': node.update,\n '<c>POSTCODE': postcode})\n \n def set_helper(self, node):\n node = self.generic_visit(node)\n return self.pc('TARGET.OP(ELEM)',\n subst={'TARGET': node.target,\n '@OP': node.op,\n 'ELEM': node.elem})\n \n visit_SetUpdate = set_helper\n visit_RCSetRefUpdate = set_helper\n \n def visit_MacroUpdate(self, node):\n op = {'union': 'update',\n 'inter': 'intersection_update',\n 'diff': 'difference_update',\n 'symdiff': 'symmetric_difference_update',\n 'assign': 'assign_update',\n 'clear': 'clear',\n 'mapassign': 'mapassign_update',\n 'mapclear': 'mapclear'}[node.op]\n if op == 'clear':\n return self.pc('TARGET.clear()',\n subst={'TARGET': node.target})\n else:\n return self.pc('TARGET.OP(OTHER)',\n subst={'TARGET': node.target,\n '@OP': op,\n 'OTHER': node.other})\n \n def visit_AssignKey(self, node):\n node = self.generic_visit(node)\n return self.pc('TARGET.assignkey(KEY, VALUE)',\n subst={'TARGET': node.target,\n 'KEY': node.key,\n 'VALUE': node.value})\n \n def visit_DelKey(self, node):\n node = self.generic_visit(node)\n return self.pc('TARGET.delkey(KEY)',\n subst={'TARGET': node.target,\n 'KEY': node.key})\n \n def visit_IsEmpty(self, node):\n node = self.generic_visit(node)\n return self.pe('TARGET.isempty()',\n subst={'TARGET': node.target})\n \n def visit_GetRef(self, node):\n node = self.generic_visit(node)\n return self.pe('TARGET.getref(ELEM)',\n subst={'TARGET': node.target,\n 'ELEM': node.elem})\n \n def visit_Lookup(self, node):\n node = self.generic_visit(node)\n default = (node.default if node.default is not None\n else NameConstant(None))\n return self.pe('TARGET.lookup(KEY, DEFAULT)',\n subst={'TARGET': node.target,\n 'KEY': node.key,\n 'DEFAULT': default})\n \n def visit_ImgLookup(self, node):\n node = self.generic_visit(node)\n return self.pe('TARGET.imglookup(KEY)',\n subst={'TARGET': node.target,\n 'KEY': node.key})\n \n def visit_RCImgLookup(self, node):\n node = self.generic_visit(node)\n return self.pe('TARGET.rcimglookup(KEY)',\n subst={'TARGET': node.target,\n 'KEY': node.key})\n \n def visit_SMLookup(self, node):\n node = self.generic_visit(node)\n default = (node.default if node.default is not None\n else NameConstant(None))\n return self.pe('TARGET.smlookup(MASK, KEY, DEFAULT)',\n subst={'TARGET': node.target,\n 'MASK': Str(node.mask),\n 'KEY': node.key,\n 'DEFAULT': default})\n \n def visit_DemQuery(self, node):\n node = self.generic_visit(node)\n args_list = List(node.args, Load())\n return self.pe('DEMQUERY(DEMNAME, ARGS, VALUE)',\n subst={'DEMNAME': Name(node.demname, Load()),\n 'ARGS': args_list,\n 'VALUE': node.value})\n \n def visit_NoDemQuery(self, node):\n node = self.generic_visit(node)\n return self.pe('NODEMQUERY(VALUE)',\n subst={'VALUE': node.value})\n \n def visit_SetMatch(self, node):\n node = self.generic_visit(node)\n return self.pe('setmatch(TARGET, MASK, KEY)',\n subst={'TARGET': node.target,\n 'MASK': Str(node.mask),\n 'KEY': node.key})\n \n def visit_DeltaMatch(self, node):\n node = self.generic_visit(node)\n return self.pe('deltamatch(TARGET, MASK, ELEM, LIMIT)',\n subst={'TARGET': node.target,\n 'MASK': Str(node.mask),\n 'ELEM': node.elem,\n 'LIMIT': Num(node.limit)})\n \n def visit_Enumerator(self, node):\n # Enumerators are converted by comp_to_setcomp() inside\n # visit_Comp(). Nonetheless, we still need to handle them\n # in this visitor in order to transform other nested\n # expressions, and to be able to print source for\n # Enumerator nodes by themselves.\n node = self.generic_visit(node)\n return comprehension(node.target, node.iter, ())\n \n def visit_Comp(self, node):\n node = self.generic_visit(node)\n setcomp = comp_to_setcomp(node)\n if node.params is None:\n paramslist = NameConstant(None)\n else:\n paramslist = List(tuple(Name(p, Load())\n for p in node.params), Load())\n opts = value_to_ast(node.options)\n result = self.pe('COMP(SETCOMP, PARAMS, OPTS)',\n subst={'SETCOMP': setcomp,\n 'PARAMS': paramslist,\n 'OPTS': opts})\n result = result._replace(type=node.type)\n return result\n \n def visit_Aggregate(self, node):\n node = self.generic_visit(node)\n opts = value_to_ast(node.options)\n return self.pe('OP(VALUE, OPTS)',\n subst={'OP': node.op,\n 'VALUE': node.value,\n 'OPTS': opts})\n\n\ndef comp_to_setcomp(node):\n \"\"\"Convert a Comp node to a SetComp. The generators may either\n be \"comprehension\" or Enumerator nodes.\n \"\"\"\n generators = []\n for clause in node.clauses:\n if isinstance(clause, Enumerator):\n gen = comprehension(clause.target, clause.iter, ())\n generators.append(gen)\n elif isinstance(clause, comprehension):\n assert len(clause.ifs) == 0\n generators.append(clause)\n elif isinstance(clause, expr):\n last = generators[-1]\n last = last._replace(ifs=last.ifs + (clause,))\n generators[-1] = last\n else:\n assert()\n return SetComp(node.resexp, tuple(generators))\n" }, { "alpha_fraction": 0.5224359035491943, "alphanum_fraction": 0.5480769276618958, "avg_line_length": 15.421052932739258, "blob_id": "c47dc1961f487866d2d20b5fc1dbccf15abcd3b5", "content_id": "09d50e1627452422e0e86f9def6578f8782e9e56", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 312, "license_type": "no_license", "max_line_length": 63, "num_lines": 19, "path": "/incoq/tests/programs/deminc/tup/obj_in.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "# Comprehensions with tuples on enum LHS, in the object domain.\n\nfrom incoq.runtime import *\n\nOPTIONS(\n obj_domain = True,\n)\n\nQUERYOPTIONS(\n '{a for (a, b) in s if a > 1}',\n impl = 'dem',\n)\n\ns = Set()\n\nfor x, y in [(1, 2), (2, 3), (3, 4)]:\n s.add((x, y))\n\nprint(sorted({a for (a, b) in s if a > 1}))\n" }, { "alpha_fraction": 0.6127508878707886, "alphanum_fraction": 0.6127508878707886, "avg_line_length": 20.174999237060547, "blob_id": "00d5c3cabea85dcb953f58281d9772368a97c730", "content_id": "b5bf1aa8faa5e12d7544cfbb318e9dd4f457bc6b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 847, "license_type": "no_license", "max_line_length": 69, "num_lines": 40, "path": "/experiments/wifi/wifi_in.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "# Wifi query from Tom's thesis.\n#\n# wifi -> {ap.ssid : ap in wifi.scan, ap.strength > wifi.threshold}\n\nfrom incoq.runtime import *\n\nOPTIONS(\n obj_domain = True,\n)\n\nQUERYOPTIONS(\n '{ap.ssid for ap in wifi.scan if ap.strength > wifi.threshold}',\n uset_mode = 'all',\n)\n\ndef make_wifi(threshold):\n wifi = Obj()\n wifi.scan = Set()\n wifi.threshold = threshold\n return wifi\n\ndef make_ap(ssid, strength):\n ap = Obj()\n ap.ssid = ssid\n ap.strength = strength\n return ap\n\ndef add_ap(wifi, ap):\n wifi.scan.add(ap)\n\ndef remove_ap(wifi, ap):\n wifi.scan.remove(ap)\n\ndef do_query(wifi):\n return {ap.ssid for ap in wifi.scan\n if ap.strength > wifi.threshold}\n\ndef do_query_nodemand(wifi):\n return NODEMAND({ap.ssid for ap in wifi.scan\n if ap.strength > wifi.threshold})\n" }, { "alpha_fraction": 0.41581064462661743, "alphanum_fraction": 0.48861098289489746, "avg_line_length": 34, "blob_id": "20ecebeaf5ab597865eb10efbfae769b58db67f5", "content_id": "ceadff033ec00f506c7f0f40a072acd29850f50c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2239, "license_type": "no_license", "max_line_length": 98, "num_lines": 64, "path": "/incoq/tests/programs/comp/setmatchcomp_out.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from incoq.runtime import *\n# Comp1 := {(x, z) : (x, y) in E, (y, z) in E}\n_m_Comp1_out = Map()\ndef _maint__m_Comp1_out_add(_e):\n (v7_1, v7_2) = _e\n if (v7_1 not in _m_Comp1_out):\n _m_Comp1_out[v7_1] = set()\n _m_Comp1_out[v7_1].add(v7_2)\n\ndef _maint__m_Comp1_out_remove(_e):\n (v8_1, v8_2) = _e\n _m_Comp1_out[v8_1].remove(v8_2)\n if (len(_m_Comp1_out[v8_1]) == 0):\n del _m_Comp1_out[v8_1]\n\n_m_E_in = Map()\ndef _maint__m_E_in_add(_e):\n (v5_1, v5_2) = _e\n if (v5_2 not in _m_E_in):\n _m_E_in[v5_2] = set()\n _m_E_in[v5_2].add(v5_1)\n\n_m_E_out = Map()\ndef _maint__m_E_out_add(_e):\n (v3_1, v3_2) = _e\n if (v3_1 not in _m_E_out):\n _m_E_out[v3_1] = set()\n _m_E_out[v3_1].add(v3_2)\n\nComp1 = RCSet()\ndef _maint_Comp1_E_add(_e):\n v1_DAS = set()\n # Iterate {(v1_x, v1_y, v1_z) : (v1_x, v1_y) in deltamatch(E, 'bb', _e, 1), (v1_y, v1_z) in E}\n (v1_x, v1_y) = _e\n for v1_z in (_m_E_out[v1_y] if (v1_y in _m_E_out) else set()):\n if ((v1_x, v1_y, v1_z) not in v1_DAS):\n v1_DAS.add((v1_x, v1_y, v1_z))\n # Iterate {(v1_x, v1_y, v1_z) : (v1_x, v1_y) in E, (v1_y, v1_z) in deltamatch(E, 'bb', _e, 1)}\n (v1_y, v1_z) = _e\n for v1_x in (_m_E_in[v1_y] if (v1_y in _m_E_in) else set()):\n if ((v1_x, v1_y, v1_z) not in v1_DAS):\n v1_DAS.add((v1_x, v1_y, v1_z))\n for (v1_x, v1_y, v1_z) in v1_DAS:\n if ((v1_x, v1_z) not in Comp1):\n Comp1.add((v1_x, v1_z))\n # Begin maint _m_Comp1_out after \"Comp1.add((v1_x, v1_z))\"\n _maint__m_Comp1_out_add((v1_x, v1_z))\n # End maint _m_Comp1_out after \"Comp1.add((v1_x, v1_z))\"\n else:\n Comp1.incref((v1_x, v1_z))\n del v1_DAS\n\nfor (v1, v2) in {(1, 2), (1, 3), (2, 3), (3, 4)}:\n # Begin maint _m_E_in after \"E.add((v1, v2))\"\n _maint__m_E_in_add((v1, v2))\n # End maint _m_E_in after \"E.add((v1, v2))\"\n # Begin maint _m_E_out after \"E.add((v1, v2))\"\n _maint__m_E_out_add((v1, v2))\n # End maint _m_E_out after \"E.add((v1, v2))\"\n # Begin maint Comp1 after \"E.add((v1, v2))\"\n _maint_Comp1_E_add((v1, v2))\n # End maint Comp1 after \"E.add((v1, v2))\"\np = 1\nprint(sorted((_m_Comp1_out[p] if (p in _m_Comp1_out) else set())))" }, { "alpha_fraction": 0.40146365761756897, "alphanum_fraction": 0.47569262981414795, "avg_line_length": 35.11320877075195, "blob_id": "fbcc0ac62ddb2f525e279854ba15fb426e961fa4", "content_id": "a77a50cfa3ec3234af82315b5186a19568be3a30", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1913, "license_type": "no_license", "max_line_length": 98, "num_lines": 53, "path": "/incoq/tests/programs/comp/inline_out.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from incoq.runtime import *\n# Comp1 := {x : (x, y) in E, f(y)}\n# Comp4 := {(x, z) : (x, y) in E, (y, z) in E}\n_m_E_in = Map()\n_m_E_out = Map()\nComp4 = RCSet()\nComp1 = RCSet()\ndef f(y):\n return True\n\nfor (v1, v2) in {(1, 2), (1, 3), (2, 3), (3, 4)}:\n # Begin maint _m_E_in after \"E.add((v1, v2))\"\n (v7_1, v7_2) = (v1, v2)\n if (v7_2 not in _m_E_in):\n _m_E_in[v7_2] = set()\n _m_E_in[v7_2].add(v7_1)\n # End maint _m_E_in after \"E.add((v1, v2))\"\n # Begin maint _m_E_out after \"E.add((v1, v2))\"\n (v5_1, v5_2) = (v1, v2)\n if (v5_1 not in _m_E_out):\n _m_E_out[v5_1] = set()\n _m_E_out[v5_1].add(v5_2)\n # End maint _m_E_out after \"E.add((v1, v2))\"\n # Begin maint Comp4 after \"E.add((v1, v2))\"\n v3_DAS = set()\n # Iterate {(v3_x, v3_y, v3_z) : (v3_x, v3_y) in deltamatch(E, 'bb', _e, 1), (v3_y, v3_z) in E}\n (v3_x, v3_y) = (v1, v2)\n for v3_z in (_m_E_out[v3_y] if (v3_y in _m_E_out) else set()):\n if ((v3_x, v3_y, v3_z) not in v3_DAS):\n v3_DAS.add((v3_x, v3_y, v3_z))\n # Iterate {(v3_x, v3_y, v3_z) : (v3_x, v3_y) in E, (v3_y, v3_z) in deltamatch(E, 'bb', _e, 1)}\n (v3_y, v3_z) = (v1, v2)\n for v3_x in (_m_E_in[v3_y] if (v3_y in _m_E_in) else set()):\n if ((v3_x, v3_y, v3_z) not in v3_DAS):\n v3_DAS.add((v3_x, v3_y, v3_z))\n for (v3_x, v3_y, v3_z) in v3_DAS:\n if ((v3_x, v3_z) not in Comp4):\n Comp4.add((v3_x, v3_z))\n else:\n Comp4.incref((v3_x, v3_z))\n del v3_DAS\n # End maint Comp4 after \"E.add((v1, v2))\"\n # Begin maint Comp1 after \"E.add((v1, v2))\"\n # Iterate {(v1_x, v1_y) : (v1_x, v1_y) in deltamatch(E, 'bb', _e, 1), f(v1_y)}\n (v1_x, v1_y) = (v1, v2)\n if f(v1_y):\n if (v1_x not in Comp1):\n Comp1.add(v1_x)\n else:\n Comp1.incref(v1_x)\n # End maint Comp1 after \"E.add((v1, v2))\"\nprint(sorted(Comp1))\nprint(sorted(Comp4))" }, { "alpha_fraction": 0.572519063949585, "alphanum_fraction": 0.6106870174407959, "avg_line_length": 19.6842098236084, "blob_id": "296f7ed0df48b8a61a0d8b1340dbb9bb04f1d3d1", "content_id": "b01730fa15de71f5648e0551e4242812738f46de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 393, "license_type": "no_license", "max_line_length": 71, "num_lines": 19, "path": "/incoq/tests/programs/comp/pattern_in.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "# Disable pattern conversion (input and output both have patterns).\n# No output text file because program is not executable as Python code.\n\nfrom incoq.runtime import *\n\nOPTIONS(\n pattern_in = True,\n pattern_out = True,\n)\n\nE = Set()\nS = Set()\n\nfor v1, v2 in {(1, 1), (1, 2), (1, 3), (2, 3), (3, 4)}:\n E.add((v1, v2))\n\nS.add(1)\n\nprint(sorted({x for (x, x) in E for x in S if x in S}))\n" }, { "alpha_fraction": 0.5231316685676575, "alphanum_fraction": 0.5658363103866577, "avg_line_length": 14.61111068725586, "blob_id": "f102dbb50ee40a80fd93de6dc40a94c5e7644e45", "content_id": "ef5c38cd0cbda78d13b1c34741ed8257b0d43d9f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 281, "license_type": "no_license", "max_line_length": 45, "num_lines": 18, "path": "/incoq/tests/programs/auxmap/inline_in.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "# Auxmaps, maint code inlined.\n\nfrom incoq.runtime import *\n\nOPTIONS(\n maint_inline = True,\n)\n\nR = Set()\n\nfor x, y in [(1, 2), (1, 3), (2, 3), (1, 4)]:\n R.add((x, y))\n\nR.remove((1, 4))\n\nprint(sorted(R))\nprint(sorted(setmatch(R, 'bu', 1)))\nprint(sorted(setmatch(R, 'ub', 2)))\n" }, { "alpha_fraction": 0.5366466045379639, "alphanum_fraction": 0.5406626462936401, "avg_line_length": 29.646154403686523, "blob_id": "0b9633c052e6f1c780d0f9e0df47256d59945527", "content_id": "9ca68bcc36fb7623c53052f5da6956e76786a766", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1992, "license_type": "no_license", "max_line_length": 69, "num_lines": 65, "path": "/incoq/tests/invinc/demand/test_demclause.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Unit tests for demclause.py.\"\"\"\n\n\nimport unittest\n\nimport incoq.compiler.incast as L\nfrom incoq.compiler.comp import EnumClause, LookupClause, Rate\nfrom incoq.compiler.demand.demclause import *\n\n\nclass DemClauseFactory(DemClauseFactory_Mixin):\n typecheck = True\n\nclass DemClauseFactory_NoTC(DemClauseFactory):\n typecheck = False\n\n\nclass DemclauseCase(unittest.TestCase):\n \n def test(self):\n cl = DemClause(EnumClause(['x', 'y'], 'R'), 'f', ['x'])\n \n # AST round-trip.\n clast = cl.to_AST()\n exp_clast = \\\n L.Enumerator(L.tuplify(['x', 'y'], lval=True),\n L.DemQuery('f', (L.ln('x'),), L.ln('R')))\n self.assertEqual(clast, exp_clast)\n cl2 = DemClause.from_AST(exp_clast, DemClauseFactory)\n self.assertEqual(cl2, cl)\n \n # Attributes.\n self.assertEqual(cl.pat_mask, (True, True))\n self.assertEqual(cl.enumvars_tagsin, ('x',))\n self.assertEqual(cl.enumvars_tagsout, ('y',))\n \n # Rewriting.\n cl2 = cl.rewrite_subst({'x': 'z'}, DemClauseFactory)\n exp_cl = DemClause(EnumClause(['z', 'y'], 'R'), 'f', ['z'])\n self.assertEqual(cl2, exp_cl)\n \n # Fancy rewriting, uses LookupClause.\n cl2 = DemClause(LookupClause(['x', 'y'], 'R'), 'f', ['x'])\n cl2 = cl2.rewrite_subst({'x': 'z'}, DemClauseFactory)\n exp_cl = DemClause(LookupClause(['z', 'y'], 'R'), 'f', ['z'])\n self.assertEqual(cl2, exp_cl)\n \n # Rating.\n rate = cl.rate(['x'])\n self.assertEqual(rate, Rate.NORMAL)\n rate = cl.rate([])\n self.assertEqual(rate, Rate.UNRUNNABLE)\n \n # Code generation.\n code = cl.get_code(['x'], L.pc('pass'))\n exp_code = L.pc('''\n DEMQUERY(f, [x], None)\n for y in setmatch(R, 'bu', x):\n pass\n ''')\n self.assertEqual(code, exp_code)\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.42239776253700256, "alphanum_fraction": 0.47769516706466675, "avg_line_length": 33.17460250854492, "blob_id": "706a116e1887e94be3507f8f3f9509f6445c0fcf", "content_id": "f0e0fe47724d0aac61cd49747558837f3024658d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2152, "license_type": "no_license", "max_line_length": 153, "num_lines": 63, "path": "/incoq/tests/programs/comp/tup/flatten_out.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from incoq.runtime import *\n# Comp1 := {z : (x, _, z) in R, x in S}\n_m_R_bwu = Map()\ndef _maint__m_R_bwu_add(_e):\n (v8_1, v8_2, v8_3) = _e\n if (v8_1 not in _m_R_bwu):\n _m_R_bwu[v8_1] = RCSet()\n if (v8_3 not in _m_R_bwu[v8_1]):\n _m_R_bwu[v8_1].add(v8_3)\n else:\n _m_R_bwu[v8_1].incref(v8_3)\n\n_m_R_bwb = Map()\ndef _maint__m_R_bwb_add(_e):\n (v6_1, v6_2, v6_3) = _e\n if ((v6_1, v6_3) not in _m_R_bwb):\n _m_R_bwb[(v6_1, v6_3)] = RCSet()\n if (() not in _m_R_bwb[(v6_1, v6_3)]):\n _m_R_bwb[(v6_1, v6_3)].add(())\n else:\n _m_R_bwb[(v6_1, v6_3)].incref(())\n\nComp1 = RCSet()\ndef _maint_Comp1_R_add(_e):\n # Iterate {(v2_x, v2_z) : (v2_x, _, v2_z) in deltamatch(R, 'bwb', _e, 1), v2_x in S}\n for (v2_x, v2_z) in setmatch(({_e} if ((_m_R_bwb[(_e[0], _e[2])] if ((_e[0], _e[2]) in _m_R_bwb) else RCSet()).getref(()) == 1) else {}), 'uwu', ()):\n if (v2_x in S):\n if (v2_z not in Comp1):\n Comp1.add(v2_z)\n else:\n Comp1.incref(v2_z)\n\ndef _maint_Comp1_S_add(_e):\n # Iterate {(v4_x, v4_z) : (v4_x, _, v4_z) in R, v4_x in deltamatch(S, 'b', _e, 1)}\n v4_x = _e\n for v4_z in (_m_R_bwu[v4_x] if (v4_x in _m_R_bwu) else RCSet()):\n if (v4_z not in Comp1):\n Comp1.add(v4_z)\n else:\n Comp1.incref(v4_z)\n\nR = Set()\nS = Set()\nfor _upelem in [(1, (2, 3)), (4, (5, 6))]:\n if (_upelem not in R):\n _ftv1 = (_upelem[0], _upelem[1][0], _upelem[1][1])\n R.add(_ftv1)\n # Begin maint _m_R_bwu after \"R.add(_ftv1)\"\n _maint__m_R_bwu_add(_ftv1)\n # End maint _m_R_bwu after \"R.add(_ftv1)\"\n # Begin maint _m_R_bwb after \"R.add(_ftv1)\"\n _maint__m_R_bwb_add(_ftv1)\n # End maint _m_R_bwb after \"R.add(_ftv1)\"\n # Begin maint Comp1 after \"R.add(_ftv1)\"\n _maint_Comp1_R_add(_ftv1)\n # End maint Comp1 after \"R.add(_ftv1)\"\nfor _upelem in [1, 4]:\n if (_upelem not in S):\n S.add(_upelem)\n # Begin maint Comp1 after \"S.add(_upelem)\"\n _maint_Comp1_S_add(_upelem)\n # End maint Comp1 after \"S.add(_upelem)\"\nprint(sorted(Comp1))" }, { "alpha_fraction": 0.3907311260700226, "alphanum_fraction": 0.4035157859325409, "avg_line_length": 33.76388931274414, "blob_id": "96c38aaf8792694df4a860d5022f8565a1f44b3b", "content_id": "6aa02722a30ff01de4c900a2e2fd726f21ad0549", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2503, "license_type": "no_license", "max_line_length": 119, "num_lines": 72, "path": "/incoq/tests/invinc/central/test_transform.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "###############################################################################\n# test_transform.py #\n# Author: Jon Brandvein #\n###############################################################################\n\n\"\"\"Unit tests for the transform module.\"\"\"\n\n\nimport unittest\n\nimport incoq.compiler.incast as L\nfrom incoq.compiler.central import CentralCase\nfrom incoq.compiler.central.transform import *\nfrom incoq.compiler.central.transform import transform_all_queries\n\n\nclass TestTransform(CentralCase):\n \n def test_transform(self):\n comp = L.pe('COMP({z for (x, y) in R for (y, z) in S}, [x], '\n '{\"impl\": \"inc\"})')\n tree = L.p('''\n R.add(1)\n print(COMP)\n ''', subst={'COMP': comp})\n \n tree = transform_all_queries(tree, self.manager)\n tree = L.elim_deadfuncs(tree, lambda n: n.startswith('_maint_'))\n \n exp_tree = L.p('''\n Comp1 = RCSet()\n def _maint_Comp1_R_add(_e):\n Comment(\"Iterate {(v1_x, v1_y, v1_z) : (v1_x, v1_y) in deltamatch(R, 'bb', _e, 1), (v1_y, v1_z) in S}\")\n (v1_x, v1_y) = _e\n for v1_z in setmatch(S, 'bu', v1_y):\n if ((v1_x, v1_z) not in Comp1):\n Comp1.add((v1_x, v1_z))\n else:\n Comp1.incref((v1_x, v1_z))\n \n with MAINT(Comp1, 'after', 'R.add(1)'):\n R.add(1)\n _maint_Comp1_R_add(1)\n print(setmatch(Comp1, 'bu', x))\n ''')\n \n self.assertEqual(tree, exp_tree)\n \n def test_preprocess(self):\n tree = L.p('''\n x = 1\n {y for (x, y) in R}\n COMP({y for (x, y) in R}, [y], {'impl': 'auxonly'})\n ''')\n given_opts = ({'obj_domain': False},\n {'{y for (x, y) in R}': {'impl': 'inc'}})\n tree, opman = preprocess_tree(\n self.manager, tree, given_opts)\n \n exp_tree = L.p('''\n x = 1\n COMP({y for (x, y) in R}, [x], {'impl': 'inc'})\n COMP({y for (x, y) in R}, [y], {'impl': 'auxonly'})\n ''')\n exp_nopts = {'obj_domain': False}\n \n self.assertEqual(tree, exp_tree)\n self.assertEqual(opman.nopts, exp_nopts)\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.42660248279571533, "alphanum_fraction": 0.48960840702056885, "avg_line_length": 36.785125732421875, "blob_id": "dd351b7d14d107836027eb9b94de4e46cc907281", "content_id": "118932b0dafad3a8ea8f7f19c37c5f7cb0ec60bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4571, "license_type": "no_license", "max_line_length": 116, "num_lines": 121, "path": "/incoq/tests/programs/comp/uset/uset_out.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from incoq.runtime import *\n# Comp1 := {(x, z) : x in _U_Comp1, (x, y) in E, (y, z) in E}\n_m_Comp1_out = Map()\ndef _maint__m_Comp1_out_add(_e):\n (v9_1, v9_2) = _e\n if (v9_1 not in _m_Comp1_out):\n _m_Comp1_out[v9_1] = set()\n _m_Comp1_out[v9_1].add(v9_2)\n\ndef _maint__m_Comp1_out_remove(_e):\n (v10_1, v10_2) = _e\n _m_Comp1_out[v10_1].remove(v10_2)\n if (len(_m_Comp1_out[v10_1]) == 0):\n del _m_Comp1_out[v10_1]\n\n_m_E_in = Map()\ndef _maint__m_E_in_add(_e):\n (v7_1, v7_2) = _e\n if (v7_2 not in _m_E_in):\n _m_E_in[v7_2] = set()\n _m_E_in[v7_2].add(v7_1)\n\n_m_E_out = Map()\ndef _maint__m_E_out_add(_e):\n (v5_1, v5_2) = _e\n if (v5_1 not in _m_E_out):\n _m_E_out[v5_1] = set()\n _m_E_out[v5_1].add(v5_2)\n\nComp1 = RCSet()\ndef _maint_Comp1__U_Comp1_add(_e):\n # Iterate {(v1_x, v1_y, v1_z) : v1_x in deltamatch(_U_Comp1, 'b', _e, 1), (v1_x, v1_y) in E, (v1_y, v1_z) in E}\n v1_x = _e\n for v1_y in (_m_E_out[v1_x] if (v1_x in _m_E_out) else set()):\n for v1_z in (_m_E_out[v1_y] if (v1_y in _m_E_out) else set()):\n if ((v1_x, v1_z) not in Comp1):\n Comp1.add((v1_x, v1_z))\n # Begin maint _m_Comp1_out after \"Comp1.add((v1_x, v1_z))\"\n _maint__m_Comp1_out_add((v1_x, v1_z))\n # End maint _m_Comp1_out after \"Comp1.add((v1_x, v1_z))\"\n else:\n Comp1.incref((v1_x, v1_z))\n\ndef _maint_Comp1__U_Comp1_remove(_e):\n # Iterate {(v2_x, v2_y, v2_z) : v2_x in deltamatch(_U_Comp1, 'b', _e, 1), (v2_x, v2_y) in E, (v2_y, v2_z) in E}\n v2_x = _e\n for v2_y in (_m_E_out[v2_x] if (v2_x in _m_E_out) else set()):\n for v2_z in (_m_E_out[v2_y] if (v2_y in _m_E_out) else set()):\n if (Comp1.getref((v2_x, v2_z)) == 1):\n # Begin maint _m_Comp1_out before \"Comp1.remove((v2_x, v2_z))\"\n _maint__m_Comp1_out_remove((v2_x, v2_z))\n # End maint _m_Comp1_out before \"Comp1.remove((v2_x, v2_z))\"\n Comp1.remove((v2_x, v2_z))\n else:\n Comp1.decref((v2_x, v2_z))\n\ndef _maint_Comp1_E_add(_e):\n v3_DAS = set()\n # Iterate {(v3_x, v3_y, v3_z) : v3_x in _U_Comp1, (v3_x, v3_y) in deltamatch(E, 'bb', _e, 1), (v3_y, v3_z) in E}\n (v3_x, v3_y) = _e\n if (v3_x in _U_Comp1):\n for v3_z in (_m_E_out[v3_y] if (v3_y in _m_E_out) else set()):\n if ((v3_x, v3_y, v3_z) not in v3_DAS):\n v3_DAS.add((v3_x, v3_y, v3_z))\n # Iterate {(v3_x, v3_y, v3_z) : v3_x in _U_Comp1, (v3_x, v3_y) in E, (v3_y, v3_z) in deltamatch(E, 'bb', _e, 1)}\n (v3_y, v3_z) = _e\n for v3_x in (_m_E_in[v3_y] if (v3_y in _m_E_in) else set()):\n if (v3_x in _U_Comp1):\n if ((v3_x, v3_y, v3_z) not in v3_DAS):\n v3_DAS.add((v3_x, v3_y, v3_z))\n for (v3_x, v3_y, v3_z) in v3_DAS:\n if ((v3_x, v3_z) not in Comp1):\n Comp1.add((v3_x, v3_z))\n # Begin maint _m_Comp1_out after \"Comp1.add((v3_x, v3_z))\"\n _maint__m_Comp1_out_add((v3_x, v3_z))\n # End maint _m_Comp1_out after \"Comp1.add((v3_x, v3_z))\"\n else:\n Comp1.incref((v3_x, v3_z))\n del v3_DAS\n\n_U_Comp1 = RCSet()\n_UEXT_Comp1 = Set()\ndef demand_Comp1(x):\n '{(x, z) : x in _U_Comp1, (x, y) in E, (y, z) in E}'\n if (x not in _U_Comp1):\n _U_Comp1.add(x)\n # Begin maint Comp1 after \"_U_Comp1.add(x)\"\n _maint_Comp1__U_Comp1_add(x)\n # End maint Comp1 after \"_U_Comp1.add(x)\"\n else:\n _U_Comp1.incref(x)\n\ndef undemand_Comp1(x):\n '{(x, z) : x in _U_Comp1, (x, y) in E, (y, z) in E}'\n if (_U_Comp1.getref(x) == 1):\n # Begin maint Comp1 before \"_U_Comp1.remove(x)\"\n _maint_Comp1__U_Comp1_remove(x)\n # End maint Comp1 before \"_U_Comp1.remove(x)\"\n _U_Comp1.remove(x)\n else:\n _U_Comp1.decref(x)\n\ndef query_Comp1(x):\n '{(x, z) : x in _U_Comp1, (x, y) in E, (y, z) in E}'\n if (x not in _UEXT_Comp1):\n _UEXT_Comp1.add(x)\n demand_Comp1(x)\n return True\n\nfor (v1, v2) in {(1, 2), (2, 3), (2, 4), (4, 5)}:\n # Begin maint _m_E_in after \"E.add((v1, v2))\"\n _maint__m_E_in_add((v1, v2))\n # End maint _m_E_in after \"E.add((v1, v2))\"\n # Begin maint _m_E_out after \"E.add((v1, v2))\"\n _maint__m_E_out_add((v1, v2))\n # End maint _m_E_out after \"E.add((v1, v2))\"\n # Begin maint Comp1 after \"E.add((v1, v2))\"\n _maint_Comp1_E_add((v1, v2))\n # End maint Comp1 after \"E.add((v1, v2))\"\nx = 1\nprint(sorted((query_Comp1(x) and (_m_Comp1_out[x] if (x in _m_Comp1_out) else set()))))" }, { "alpha_fraction": 0.5470778942108154, "alphanum_fraction": 0.5665584206581116, "avg_line_length": 18.25, "blob_id": "657801500ff36106960fe1e0481f465064a4371d", "content_id": "1010576c49461f2e7bf3af5e5229a49558521f0e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 616, "license_type": "no_license", "max_line_length": 71, "num_lines": 32, "path": "/incoq/tests/programs/objcomp/if_in.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "# Make sure If clauses get converted before pair domain transformation.\n# If this doesn't happen, the query will look ok at the end but the\n# maintenance code won't be inserted.\n\nfrom incoq.runtime import *\n\nOPTIONS(\n obj_domain = True,\n)\n\ns1 = Set()\ns2 = Set()\nt = Set()\nfor i in {1, 2, 3, 4, 5}:\n o = Obj()\n o.i = i\n if i % 2:\n s1.add(o)\n else:\n s2.add(o)\n t.add(o)\n\nQUERYOPTIONS(\n '{o.i for o in s if o in t}',\n params = ['s', 't'],\n impl = 'inc',\n uset_mode = 'none',\n)\ns = s1\nprint(sorted({o.i for o in s if o in t}))\ns = s2\nprint(sorted({o.i for o in s if o in t}))\n" }, { "alpha_fraction": 0.4095064103603363, "alphanum_fraction": 0.4271785616874695, "avg_line_length": 23.863636016845703, "blob_id": "e10d559e7b123b96c63ad98d38a9aaf9ec704bc3", "content_id": "ffb6dbefc95da46fb6735a8d755acbe5e107330f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1641, "license_type": "no_license", "max_line_length": 79, "num_lines": 66, "path": "/incoq/tests/util/collections/test_collections.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "###############################################################################\n# test_collections.py #\n# Author: Jon Brandvein #\n###############################################################################\n\n\"\"\"Unit tests for the collections module.\"\"\"\n\n\nimport unittest\n\nfrom incoq.util.collections import *\n\n\nclass TestRegistry(unittest.TestCase):\n \n def test_strictness(self):\n d = Registry()\n d[1] = 2\n \n with self.assertRaises(KeyError):\n d[1] = 3\n with self.assertRaises(KeyError):\n del d[2]\n with self.assertRaises(KeyError):\n d.update({1:2, 2:2})\n \n del d[1]\n\n\nclass TestSetRegistry(unittest.TestCase):\n \n class DummyRegistry(SetRegistry):\n \n def elem_key(self, elem):\n return elem[0]\n \n def test_strictness(self):\n s = self.DummyRegistry()\n \n s.add('abc')\n s.add('def')\n s.add('xyz')\n s.remove('def')\n \n with self.assertRaises(KeyError):\n s.add('xzx')\n \n s.clear()\n s.add('abc')\n s.discard('123')\n self.assertFalse('456' in s)\n \n def testFrozendict(self):\n d = frozendict({1:2, 3:4})\n with self.assertRaises(TypeError):\n d[3] = 5\n hash(d)\n \n def testFreeze(self):\n val = make_frozen([{1: 2}, {3}])\n exp_val = (frozendict({1: 2}), frozenset({3}))\n self.assertEqual(val, exp_val)\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.75, "alphanum_fraction": 0.75, "avg_line_length": 29, "blob_id": "3d6c342a89246cff546c70d6738d22d17456a8fd", "content_id": "64fbc0b9a83a2d3600a7211838da683038657f24", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 60, "license_type": "no_license", "max_line_length": 30, "num_lines": 2, "path": "/experiments/twitter/__init__.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from .gendb_wrapper import *\nfrom .run_twitter_exp import *\n" }, { "alpha_fraction": 0.43103447556495667, "alphanum_fraction": 0.5, "avg_line_length": 31.441177368164062, "blob_id": "3e2b02052937cf474270435f65ce234d47bb1800", "content_id": "74b08737d23a0bec721883f7e0f31cf1d5b11540", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1102, "license_type": "no_license", "max_line_length": 58, "num_lines": 34, "path": "/incoq/tests/programs/auxmap/inline_out.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from incoq.runtime import *\n_m_R_in = Map()\n_m_R_out = Map()\nR = Set()\nfor (x, y) in [(1, 2), (1, 3), (2, 3), (1, 4)]:\n R.add((x, y))\n # Begin maint _m_R_in after \"R.add((x, y))\"\n (v3_1, v3_2) = (x, y)\n if (v3_2 not in _m_R_in):\n _m_R_in[v3_2] = set()\n _m_R_in[v3_2].add(v3_1)\n # End maint _m_R_in after \"R.add((x, y))\"\n # Begin maint _m_R_out after \"R.add((x, y))\"\n (v1_1, v1_2) = (x, y)\n if (v1_1 not in _m_R_out):\n _m_R_out[v1_1] = set()\n _m_R_out[v1_1].add(v1_2)\n # End maint _m_R_out after \"R.add((x, y))\"\n# Begin maint _m_R_out before \"R.remove((1, 4))\"\n(v2_1, v2_2) = (1, 4)\n_m_R_out[v2_1].remove(v2_2)\nif (len(_m_R_out[v2_1]) == 0):\n del _m_R_out[v2_1]\n# End maint _m_R_out before \"R.remove((1, 4))\"\n# Begin maint _m_R_in before \"R.remove((1, 4))\"\n(v4_1, v4_2) = (1, 4)\n_m_R_in[v4_2].remove(v4_1)\nif (len(_m_R_in[v4_2]) == 0):\n del _m_R_in[v4_2]\n# End maint _m_R_in before \"R.remove((1, 4))\"\nR.remove((1, 4))\nprint(sorted(R))\nprint(sorted((_m_R_out[1] if (1 in _m_R_out) else set())))\nprint(sorted((_m_R_in[2] if (2 in _m_R_in) else set())))" }, { "alpha_fraction": 0.6395705342292786, "alphanum_fraction": 0.6395705342292786, "avg_line_length": 18.75757598876953, "blob_id": "4e1999834cf59e67c6287ee59670887d20cb4c25", "content_id": "6dee4637c55f5469c103f96d1dd816188eebb88f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 652, "license_type": "no_license", "max_line_length": 92, "num_lines": 33, "path": "/experiments/django/django_simp_osq.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "# Modified version of django_osq.py with the same\n# changes as were made in django_simp_in.py.\n\nfrom incoq.runtime import *\nfrom osq import query\n\ndef make_user(id):\n user = Obj()\n user.id = id\n user.groups = Set()\n return user\n\ndef make_group(active):\n group = Obj()\n group.active = active\n group.perms = Set()\n return group\n\ndef make_perm(name):\n perm = Obj()\n perm.name = name\n return perm\n\ndef add_group(u, g):\n u.groups.add(g)\n\ndef add_perm(g, p):\n g.perms.add(p)\n\ndef do_query(user):\n return query('user -> {p.name for g in user.groups for p in g.perms if g.active}', user)\n\ndo_query_nodemand = do_query\n" }, { "alpha_fraction": 0.6080197095870972, "alphanum_fraction": 0.6081145405769348, "avg_line_length": 33.815181732177734, "blob_id": "69db0efcc290b8092be628397d93e9dac0c3b9ec", "content_id": "83d371d1a39f45249037374061977cceae892594", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10549, "license_type": "no_license", "max_line_length": 75, "num_lines": 303, "path": "/incoq/compiler/obj/objcomp.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Object-set comprehension translation.\"\"\"\n\n\n__all__ = [\n 'flatten_comp',\n 'unflatten_comp',\n]\n\n\nfrom incoq.util.collections import OrderedSet\nimport incoq.compiler.incast as L\n\nfrom .pairrel import (make_mrel, is_mrel, get_menum, make_frel, get_fenum,\n make_maprel, get_mapenum, is_specialrel)\n\n\nclass RetrievalReplacer(L.NodeTransformer):\n \n \"\"\"Replace simple field and map retrieval expressions with a\n variable. A retrieval expression is simple if the object or map\n part of the expression is just a variable. Raise an error if any\n non-simple retrievals are encountered.\n \n Retrievals are processed inner-to-outer, so complex expressions\n like a.b[c.d].e can be handled, so long as they are built up using\n only variables and retrievals.\n \n The name of the replacement variable is given by the field_namer\n and map_namer functions.\n \n Two attributes, field_repls and map_repls, are made available for\n inspecting what replacements were performed. They are OrderedSets\n of triples where the first two components are the object/map and\n field/key respectively, and the third component is the replacement\n variable name. These attributes are cleared when process() is called\n again.\n \"\"\"\n \n def __init__(self, field_namer, map_namer):\n super().__init__()\n self.field_namer = field_namer\n self.map_namer = map_namer\n \n def process(self, tree):\n self.field_repls = OrderedSet()\n self.map_repls = OrderedSet()\n tree = super().process(tree)\n return tree\n \n def visit_Attribute(self, node):\n node = self.generic_visit(node)\n \n if not isinstance(node.value, L.Name):\n raise L.ProgramError('Non-simple field retrieval', node=node)\n obj = node.value.id\n field = node.attr\n \n new_name = self.field_namer(obj, field)\n self.field_repls.add((obj, field, new_name))\n \n return L.Name(new_name, node.ctx)\n \n def visit_Subscript(self, node):\n node = self.generic_visit(node)\n \n if not (isinstance(node.value, L.Name) and\n isinstance(node.slice, L.Index) and\n isinstance(node.slice.value, L.Name)):\n raise L.ProgramError('Non-simple map retrieval', node=node)\n map = node.value.id\n key = node.slice.value.id\n \n new_name = self.map_namer(map, key)\n self.map_repls.add((map, key, new_name))\n \n return L.Name(new_name, node.ctx)\n\nclass RetrievalExpander(L.NodeTransformer):\n \n \"\"\"Replace field and map replacement names with their corresponding\n retrieval expressions. Takes in mappings from replacement variable\n names to pairs of obj/map and field/key names. Expansion is\n recursive.\n \"\"\"\n \n def __init__(self, field_exps, map_exps):\n super().__init__()\n self.field_exps = field_exps\n self.map_exps = map_exps\n \n def visit_Name(self, node):\n if node.id in self.field_exps:\n obj, field = self.field_exps[node.id]\n new_node = L.Attribute(L.ln(obj), field, node.ctx)\n new_node = self.generic_visit(new_node)\n return new_node\n \n elif node.id in self.map_exps:\n map, key = self.map_exps[node.id]\n new_node = L.Subscript(L.ln(map), L.Index(L.ln(key)), node.ctx)\n new_node = self.generic_visit(new_node)\n return new_node\n \n else:\n return node\n\n\ndef flatten_retrievals(comp):\n \"\"\"Flatten the retrievals in a Comp node. Return a triple of the new\n Comp node, an OrderedSet of the fields seen, and a bool indicating\n whether a map was seen.\n \n Field and map clauses are introduced immediately to the left of their\n first use (or for the result expression, at the end of the clause\n list).\n \"\"\"\n # For map_namer, add a little extra fluff to reduce the liklihood\n # of us inadvertently creating ambiguous names.\n field_namer = lambda obj, field: obj + '_' + field\n map_namer = lambda map, key: 'm_' + map + '_k_' + key\n replacer = RetrievalReplacer(field_namer, map_namer)\n \n seen_fields = OrderedSet()\n seen_map = False\n seen_field_repls = OrderedSet()\n seen_map_repls = OrderedSet()\n \n def process(expr):\n \"\"\"Rewrite any retrievals in the given expression. Return a pair\n of the new expression, and a list of new clauses to be added\n for any retrievals not already seen.\n \"\"\"\n nonlocal seen_map\n new_expr = replacer.process(expr)\n new_field_repls = replacer.field_repls - seen_field_repls\n new_map_repls = replacer.map_repls - seen_map_repls\n new_clauses = []\n \n for repl in new_field_repls:\n obj, field, value = repl\n seen_fields.add(field)\n seen_field_repls.add(repl)\n new_cl = L.Enumerator(L.tuplify((obj, value), lval=True),\n L.ln(make_frel(field)))\n new_clauses.append(new_cl)\n \n for repl in new_map_repls:\n map, key, value = repl\n seen_map = True\n seen_map_repls.add(repl)\n new_cl = L.Enumerator(L.tuplify((map, key, value), lval=True),\n L.ln(make_maprel()))\n new_clauses.append(new_cl)\n \n return new_expr, new_clauses\n \n new_comp = L.rewrite_compclauses(comp, process)\n \n return new_comp, seen_fields, seen_map\n\ndef unflatten_retrievals(comp):\n \"\"\"Unflatten the retrievals of a Comp. Eliminate field and map\n enumerators and expand replacement variables to retrievals.\n \"\"\"\n def unwrap(node):\n assert isinstance(node, L.Name)\n return node.id\n \n # Remove field and map clauses and record replacement info.\n field_exps = {}\n map_exps = {}\n new_clauses = []\n for cl in comp.clauses:\n as_field = get_fenum(cl)\n as_map = get_mapenum(cl)\n \n if as_field is not None:\n obj, value, field = as_field\n # Since no expansions have been done yet, we know that field\n # and map clauses just have variables on the lhs.\n obj, value = unwrap(obj), unwrap(value)\n field_exps[value] = (obj, field)\n \n elif as_map is not None:\n map, key, value = as_map\n map, key, value = unwrap(map), unwrap(key), unwrap(value)\n map_exps[value] = (map, key)\n \n else:\n new_clauses.append(cl)\n \n # Apply replacements to the comprehension.\n expander = RetrievalExpander(field_exps, map_exps)\n new_comp = comp._replace(clauses=tuple(new_clauses))\n new_comp = expander.process(new_comp)\n \n return new_comp\n\n\ndef flatten_set_clause(cl, input_rels):\n \"\"\"Turn a membership clause that is not over a comprehension,\n special relation, or input relation, into a clause over the M-set.\n Return a pair of the (possibly unchanged) clause and a bool\n indicating whether or not the change was done.\n \n This also works on condition clauses that express membership\n constraints. The rewritten clause is still a condition clause.\n \"\"\"\n def should_trans(rhs):\n return (not isinstance(rhs, L.Comp) and\n not (isinstance(rhs, L.Name) and\n (is_specialrel(rhs.id) or rhs.id in input_rels)))\n \n # Enumerator case.\n if isinstance(cl, L.Enumerator) and should_trans(cl.iter):\n item = cl.target\n cont = cl.iter\n cont = L.ContextSetter.run(cont, L.Store)\n new_cl = L.Enumerator(L.tuplify((cont, item), lval=True),\n L.ln(make_mrel()))\n return new_cl, True\n \n # Condition case.\n if isinstance(cl, L.expr) and L.is_cmp(cl):\n item, op, cont = L.get_cmp(cl)\n if isinstance(op, L.In) and should_trans(cont):\n new_cl = L.cmp(L.tuplify((cont, item)),\n L.In(),\n L.ln(make_mrel()))\n return new_cl, True\n \n return cl, False\n\ndef unflatten_set_clause(cl):\n \"\"\"Opposite of above. Unflatten clauses over the M-set. Works for\n both enumerators and conditions. Returns the (possibly unchanged)\n clause.\n \"\"\"\n # Enumerator case.\n if isinstance(cl, L.Enumerator):\n res = get_menum(cl)\n if res is None:\n return cl\n cont, item = res\n \n cont = L.ContextSetter.run(cont, L.Load)\n new_cl = L.Enumerator(item, cont)\n return new_cl\n \n # Condition case.\n if isinstance(cl, L.expr) and L.is_cmp(cl):\n lhs, op, rhs = L.get_cmp(cl)\n if not (isinstance(op, L.In) and\n isinstance(lhs, L.Tuple) and len(lhs.elts) == 2 and\n L.is_name(rhs) and is_mrel(L.get_name(rhs))):\n return cl\n cont, item = lhs.elts\n new_cl = L.cmp(item, L.In(), cont)\n return new_cl\n \n return cl\n\n\ndef flatten_sets(comp, input_rels):\n \"\"\"Flatten the set iterations in a Comp node. Return a pair of the\n new comp and a bool indicating whether or not the M-set was used.\n (As a practical matter, this should generally be True.) Enumerators\n over pair relations, input relations, and other comprehensions are\n not affected.\n \"\"\"\n use_mset = False\n \n def process(cl):\n nonlocal use_mset\n new_cl, new_use_mset = flatten_set_clause(cl, input_rels)\n use_mset |= new_use_mset\n return new_cl, []\n \n new_comp = L.rewrite_compclauses(comp, process, resexp=False)\n \n return new_comp, use_mset\n\ndef unflatten_sets(comp):\n \"\"\"Unflatten _M enumerators.\"\"\"\n new_clauses = tuple(unflatten_set_clause(cl) for cl in comp.clauses)\n return comp._replace(clauses=new_clauses)\n\n\ndef flatten_comp(comp, input_rels):\n \"\"\"Flatten away objects and nested sets. Return a tuple of the new\n comp, a boolean indicating whether the M-set is used (in practice,\n always True), an OrderedSet of the fields replaced, and a boolean\n indicating whether a map retrieval is used.\n \"\"\"\n comp, fields, use_map = flatten_retrievals(comp)\n comp, use_mset = flatten_sets(comp, input_rels)\n return comp, use_mset, fields, use_map\n\ndef unflatten_comp(comp):\n \"\"\"Unflatten a relational comprehension back to the object domain.\"\"\"\n comp = unflatten_sets(comp)\n comp = unflatten_retrievals(comp)\n return comp\n" }, { "alpha_fraction": 0.6851851940155029, "alphanum_fraction": 0.7037037014961243, "avg_line_length": 17, "blob_id": "8988100a94d1cb19c9dd55c5d38fb54eff77ac0e", "content_id": "3c280839e1138b2a82c74c55503329e41cc777c4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 54, "license_type": "no_license", "max_line_length": 28, "num_lines": 3, "path": "/experiments/jql/jql_2_orig.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from .jql_2_in import *\n\ndo_query_nodemand = do_query\n" }, { "alpha_fraction": 0.5081967115402222, "alphanum_fraction": 0.5437158346176147, "avg_line_length": 17.299999237060547, "blob_id": "679cf643a973638a76315548a4dad1ecf350682a", "content_id": "edd9660ad9e2614ae2e430b4cdd412231a6827b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 366, "license_type": "no_license", "max_line_length": 65, "num_lines": 20, "path": "/incoq/tests/programs/comp/patternmaint_in.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "# Comprehension whose maintenance requires auxmaps for equalities\n# and wildcards.\n\nfrom incoq.runtime import *\n\nQUERYOPTIONS(\n '{x for (x, x2, y, z) in P if y in S if x == x2}',\n impl = 'inc',\n)\n\nP = Set()\nS = Set()\n\nfor v in {(1, 1, 2, 3), (1, 2, 2, 4)}:\n P.add(v)\n\nS.add(2)\n\nprint(sorted({x for (x, x2, y, z) in P if y in S\n if x == x2}))\n" }, { "alpha_fraction": 0.6258644461631775, "alphanum_fraction": 0.6261410713195801, "avg_line_length": 33.75961685180664, "blob_id": "6e7d9bd3687f98ad4204797affd5f10c5c9503de", "content_id": "c3fb54984ef3e14ab35b738a0408471b1355cf2b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7230, "license_type": "no_license", "max_line_length": 79, "num_lines": 208, "path": "/incoq/tests/programs/test_transformation.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "###############################################################################\n# test_transformation.py #\n# Author: Jon Brandvein #\n###############################################################################\n\n\"\"\"Test the overall transformation. Both the generated code and its\noutput are checked.\n\nThis module recursively scans this directory, finding and running\nall test cases that match the black/whitelist.\n\nA transformation test case FOO consists of a file FOO_in.py and\nFOO_out.py. It transforms FOO_in.py and confirms that the resulting\nprogram exactly matches FOO_out.py. This kind of test is thus sensitive\nto non-determinism in the transformation, whether it's the order of\ncode insertion or the choice of fresh variable names. We make it a point\nto avoid this kind of non-determinism throughout the system.\n\nA behavior test consists of a file FOO_in.py and FOO_out.txt. It first\ntransforms FOO_in.py, then it runs both the original and transformed\nprogram and confirms that the result is exactly the text specified in\nFOO_out.txt. This test is sensitive to non-determinism in the program\nsemantics. Therefore, the test programs will sort sets before printing\nthem.\n\"\"\"\n\n# NOTE: If the bintrees library is installed without support for FastAVLTree,\n# behavior tests will stupidly fail due to extra stdout warning-pollution by\n# the bintrees library.\n\n\nimport unittest\nimport os\n\nfrom incoq.util.pyexec import pyexec_source\nfrom incoq.compiler.central import transform_source\n\n\n# Doesn't do anything at the moment.\nVERBOSE = False\n#VERBOSE = True\n\ntest_directory = os.path.split(__file__)[0]\nMAXDIFF = None\n\ndef check_basepath(base_path):\n \"\"\"Return True if this test is whitelisted or not blacklisted;\n False otherwise. (Whitelist takes priority.)\n \"\"\"\n from fnmatch import fnmatch\n whitelist = [\n ]\n blacklist = [\n# '*',\n# 'auxmap/*',\n# 'comp/*',\n# 'objcomp/*',\n# 'deminc/*',\n# 'aggr/*',\n ]\n \n # TODO: This include/exclude path logic could be refactored into\n # util to be shared with tools/linecount. But the proper way to\n # structure it would be to move the util library outside the incoq\n # package and into the top level.\n # TODO: Use globs instead of fnmatch?\n whitelist = [os.path.normpath(p) for p in whitelist]\n blacklist = [os.path.normpath(p) for p in blacklist]\n rel_path = os.path.relpath(base_path, start=test_directory)\n return (any(fnmatch(rel_path, item) for item in whitelist) or\n all(not fnmatch(rel_path, item) for item in blacklist))\n\n\ndef make_transform_test(base_name, in_name, outpy_name, outtxt_name):\n \n def template(self):\n with open(in_name, 'r') as in_file, \\\n open(outpy_name, 'r') as outpy_file:\n in_source = in_file.read()\n exp_source = outpy_file.read()\n \n result_source, _manager = transform_source(in_source)\n \n self.assertEqual(result_source, exp_source)\n \n base_relpath = os.path.relpath(base_name)\n template.__name__ = 'test_transform_' + base_relpath\n return template\n\n\ndef make_behavior_test(base_name, in_name, outpy_name, outtxt_name):\n \n def template(self):\n with open(in_name, 'r') as in_file, \\\n open(outtxt_name, 'r') as outtxt_file:\n in_source = in_file.read()\n exp_txt = outtxt_file.read()\n \n result_source, _manager = transform_source(in_source)\n \n in_txt = pyexec_source(in_source)\n result_txt = pyexec_source(result_source)\n \n self.assertEqual(in_txt, exp_txt)\n self.assertEqual(result_txt, exp_txt)\n \n base_relpath = os.path.relpath(base_name)\n template.__name__ = 'test_behavior_' + base_relpath\n return template\n\n\ndef get_test_entries(dirfiles):\n \"\"\"Given a list of pairs of directories and files, return a set of\n tuples (dir, base_name, in_name, outpy_name, outtxt_name)\n representing a group of test files. base_name is the common prefix\n to the three other files. An entry is returned iff the in_name file\n actually exists. If the other two files do not exist, None is\n substituted.\n \"\"\"\n test_entries = set()\n for dir, filenames in dirfiles:\n for name in filenames:\n if name.endswith('_in.py'):\n base_name = name[:-len('_in.py')]\n in_name = name\n outpy_name = base_name + '_out.py'\n outtxt_name = base_name + '_out.txt'\n if outpy_name not in filenames:\n outpy_name = None\n if outtxt_name not in filenames:\n outtxt_name = None\n \n test_entries.add((dir, base_name, in_name,\n outpy_name, outtxt_name))\n \n return test_entries\n\n\ndef get_tests():\n \"\"\"Find runnable tests by searching this directory for input files.\n Return a pair of a list of transformation tests and a list of\n behavior tests.\n \"\"\"\n \n # Walk the directory to find all files.\n walk_entries = list(os.walk(test_directory))\n dirfiles = [(dirpath, filenames) for dirpath, _, filenames in walk_entries]\n \n # Group files by test.\n test_entries = get_test_entries(dirfiles)\n \n # Create test functions. Omit the transformation test if there's no\n # outpy file. Omit the behavior test if there's no outtxt file.\n \n transform_tests = []\n behavior_tests = []\n \n for (dir, *names) in test_entries:\n base_name, in_name, outpy_name, outtxt_name = names\n base_fullpath = os.path.join(dir, base_name)\n base_path = os.path.relpath(base_fullpath)\n \n if not check_basepath(base_path):\n continue\n \n args = [os.path.join(dir, name) if name is not None else None\n for name in names]\n \n if outpy_name is not None:\n test = make_transform_test(*args)\n transform_tests.append(test)\n \n if outtxt_name is not None:\n test = make_behavior_test(*args)\n behavior_tests.append(test)\n \n return transform_tests, behavior_tests\n\n\nclass TestTransform(unittest.TestCase):\n maxDiff = MAXDIFF\n\nclass TestBehavior(unittest.TestCase):\n maxDiff = MAXDIFF\n\ntransform_tests, behavior_tests = get_tests()\nfor func in transform_tests:\n assert getattr(TestTransform, func.__name__, None) is None\n setattr(TestTransform, func.__name__, func)\nfor func in behavior_tests:\n assert getattr(TestBehavior, func.__name__, None) is None\n setattr(TestBehavior, func.__name__, func)\n\n\n# Run transform tests first, for convenience.\ndef load_tests(loader, tests, pattern):\n suite = unittest.TestSuite()\n transform_tests = loader.loadTestsFromTestCase(TestTransform)\n behavior_tests = loader.loadTestsFromTestCase(TestBehavior)\n suite.addTests(transform_tests)\n suite.addTests(behavior_tests)\n return suite\n\n\nif __name__ == '__main__':\n import sys\n runner = unittest.TextTestRunner(stream=sys.stdout, verbosity=2)\n unittest.main(testRunner=runner)\n" }, { "alpha_fraction": 0.5210029482841492, "alphanum_fraction": 0.5337023735046387, "avg_line_length": 27.83568000793457, "blob_id": "83bde1f564a47ff28b4d686a4e391bc4e519a1e4", "content_id": "4c102a1fe0dde8567ce6024fc53f158093b073b1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6142, "license_type": "no_license", "max_line_length": 70, "num_lines": 213, "path": "/experiments/util/__init__.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Experiment utilities.\"\"\"\n\n__all__ = [\n 'lprof_totaltime',\n]\n\n\nfrom functools import partial\n\nfrom frexp.extractor import Extractor\n\nimport incoq.runtime\nimport osq\n\n\ndef lprof_totaltime(report):\n \"\"\"Given a (possibly partial) string output of line_profiler,\n return the total time.\n \"\"\"\n pat = r'^\\s*(\\d+)\\s+(\\d+)\\s+(\\d+).*'\n lines = report.split('\\n')\n total_time = 0\n for line in lines:\n m = re.match(pat, line)\n if m is not None:\n total_time += int(m.group(3))\n return total_time\n\n\nclass SmallExtractor(Extractor):\n \n \"\"\"Small format, for paper figures.\"\"\"\n \n rcparams = {\n 'font.size': 11,\n 'legend.fontsize': 10,\n 'legend.numpoints': 1,\n 'legend.labelspacing': .2,\n 'axes.titlesize': 11,\n 'axes.linewidth': .5,\n 'lines.linewidth': .5,\n 'lines.markersize': 3,\n \n 'font.family': 'serif',\n 'font.serif': ['Computer Modern'],\n 'text.usetex': True,\n \n 'legend.frameon': False,\n }\n \n timesop = '$\\\\times$'\n \n ylabelpad = 2\n xlabelpad = 2\n \n dpi = 200\n figsize = (3.5, 2.79)\n\nclass LargeExtractor(Extractor):\n \n \"\"\"Large format, for clear, standalone images.\"\"\"\n \n rcparams = {\n 'font.size': 24,\n 'legend.fontsize': 24,\n 'lines.linewidth': 2,\n 'lines.markersize': 5,\n \n 'xtick.major.size': 10,\n 'ytick.major.size': 10,\n 'xtick.minor.size': 8,\n 'ytick.minor.size': 8,\n 'xtick.major.width': 2,\n 'ytick.major.width': 2,\n 'xtick.minor.width': 1,\n 'ytick.minor.width': 1,\n \n 'legend.frameon': False,\n }\n \n figsize = (10, 7.5)\n\nclass PosterExtractor(Extractor):\n \n \"\"\"Format for 8x6 figure in poster.\"\"\"\n \n rcparams = {\n 'font.size': 32,\n 'legend.fontsize': 24,\n 'legend.numpoints': 1,\n 'lines.linewidth': 3,\n 'lines.markersize': 8,\n \n 'xtick.major.size': 10,\n 'ytick.major.size': 10,\n 'xtick.minor.size': 8,\n 'ytick.minor.size': 8,\n 'xtick.major.width': 2,\n 'ytick.major.width': 2,\n 'xtick.minor.width': 1,\n 'ytick.minor.width': 1,\n \n 'legend.frameon': False,\n }\n \n figsize = (8, 6)\n \n tightlayout_pad = .1\n\ndef djb2(s):\n \"\"\"Simple string hash algorithm.\"\"\"\n val = 5381\n mod = 2**32\n for c in s:\n val = (val << 5 + val + ord(c)) % mod\n return val\n\ndef canonize(tree, *, use_hash=False):\n \"\"\"Recursively convert a tree of values into a canonical form.\n Values of specific types are replaced as follows:\n \n - Primitive types (None, int, float, and string) are\n left alone.\n \n - Lists and tuples are turned into tuples.\n \n - Sets and frozensets are turned into frozensets.\n \n - Dictionaries become frozensets of (key, value) tuples.\n \n - The keys must be strings.\n \n - As a special case, dictionaries whose keys all begin with\n underscores are turned into frozensets of their values.\n This includes empty dictionaries.\n \n - Sets and RCSets from incoq.runtime get turned into frozensets.\n \n - Objs from incoq.runtime get turned into a pair of their class\n name and a frozenset of their __dict__, excluding keys that\n begin with one or more underscores.\n \n - RCSets from osq get replaced by frozensets.\n \n Aliasing is not preserved. Canonizing also has the effect of\n deep-copying.\n \n The purpose of this function is to create a nearly semantically\n equivalent value that can be compared for equality with other\n values. This is needed because the transformed program uses\n helper types that are similar to, but not identical to, their\n corresponding basic Python types.\n \n If hash is True, the returned values are deterministically\n hashed (not necessarily using __hash__, which is randomzied\n for some types).\n \"\"\"\n can = partial(canonize, use_hash=use_hash)\n hash_kind = 'NORMAL'\n \n if isinstance(tree, (type(None), int, float, str, bool)):\n result = tree\n if isinstance(tree, str):\n hash_kind = 'STRING'\n \n elif isinstance(tree, (incoq.runtime.Set, incoq.runtime.RCSet,\n osq.incr.RCSet)):\n result = frozenset(can(v) for v in tree)\n \n elif isinstance(tree, incoq.runtime.Obj):\n name = tree.__class__.__name__\n attrs = frozenset((k, can(v))\n for k, v in tree.__dict__.items()\n if not k.startswith('_'))\n hash_kind = 'OBJ'\n result = (name, attrs)\n \n elif isinstance(tree, (list, tuple)):\n result = tuple(can(v) for v in tree)\n \n elif isinstance(tree, (set, frozenset)):\n result = frozenset(can(v) for v in tree)\n \n elif isinstance(tree, dict):\n if all(k.startswith('_') for k in tree.keys()):\n result = frozenset(can(v) for v in tree.values())\n else:\n hash_kind = 'DICT'\n result = frozenset((k, can(v))\n for k, v in tree.items())\n \n else:\n raise ValueError('Un-canonizable type: ' + str(type(tree)))\n \n assert type(result) in [type(None), int, float, str, bool,\n tuple, frozenset]\n \n if use_hash:\n if hash_kind == 'STRING':\n result = djb2(result)\n elif hash_kind == 'OBJ':\n name, attrs = result\n name = djb2(name)\n attrs = frozenset((djb2(k), v) for k, v in attrs)\n result = (name, hash(attrs))\n result = hash(result)\n elif hash_kind == 'DICT':\n result = frozenset((djb2(k), v) for k, v in result)\n result = hash(result)\n else:\n result = hash(result)\n \n return result\n" }, { "alpha_fraction": 0.4131607413291931, "alphanum_fraction": 0.41531822085380554, "avg_line_length": 29.393442153930664, "blob_id": "907dc974277c20547b2fa0e5bb632bf4830cf30d", "content_id": "3867c55c58bf610aa8dd8d192a72ad986946d9e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1854, "license_type": "no_license", "max_line_length": 79, "num_lines": 61, "path": "/incoq/tests/util/collections/test_partitioning.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "###############################################################################\n# test_partitioning.py #\n# Author: Jon Brandvein #\n###############################################################################\n\n\"\"\"Unit tests for the partitioning module.\"\"\"\n\n\nimport unittest\n\nfrom incoq.util.collections.partitioning import *\n\nfs = frozenset\n\n\nclass TestPartitioning(unittest.TestCase):\n \n def test_disjoint(self):\n with self.assertRaises(AssertionError):\n Partitioning([['x', 'y'], ['x', 'z']])\n \n def test_equate(self):\n p = Partitioning()\n p.equate('a', 'a')\n p.equate('a', 'b')\n p.equate('c', 'd')\n p.equate('e', 'f')\n p.equate('f', 'g')\n p.equate('b', 'e')\n \n parts = p.to_sets()\n exp_parts = {fs(['a', 'b', 'e', 'f', 'g']),\n fs(['c', 'd'])}\n self.assertCountEqual(parts, exp_parts)\n p._check_disjointness()\n \n def test_queries(self):\n p = Partitioning([['a', 'b'], ['c']])\n \n exp_elems = ['a', 'b', 'c']\n exp_subst = {'a': 'a', 'b': 'a', 'c': 'c'}\n \n self.assertEqual(p.find('d'), 'd')\n self.assertEqual(p.elements(), exp_elems)\n self.assertEqual(p.to_subst(), exp_subst)\n \n def test_equivs(self):\n p = Partitioning.from_equivs([['a', 'b'], ['b', 'c'], ['e', 'f']])\n parts = p.to_sets()\n exp_parts = {fs(['a', 'b', 'c']), fs(['e', 'f'])}\n self.assertCountEqual(parts, exp_parts)\n \n def test_singleassignmentdict(self):\n d = SingleAssignmentDict({'a': 1, 'b': 2})\n d['c'] = 3\n with self.assertRaises(AssertionError):\n d['a'] = 4\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.5652837157249451, "alphanum_fraction": 0.5663986802101135, "avg_line_length": 37.38063049316406, "blob_id": "bd43c7f2fb96fde2932fb6fa78fc14adc122e902", "content_id": "760b355b21002ee29c2dbef9f9013cb85e85883c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17041, "license_type": "no_license", "max_line_length": 76, "num_lines": 444, "path": "/incoq/compiler/comp/join.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Joins, which are the basis for comprehensions.\"\"\"\n\n\n__all__ = [\n 'DeltaInfo',\n 'Join',\n]\n\n\nfrom itertools import chain\nfrom collections import defaultdict\n\nfrom simplestruct import Struct, Field, TypedField\n\nfrom incoq.util.type import checktype\nfrom incoq.util.seq import elim_duplicates, pairs\nfrom incoq.util.collections import Partitioning\nimport incoq.compiler.incast as L\nfrom incoq.compiler.set import Mask\n\nfrom .clause import Clause\nfrom .order import AsymptoticOrderer\n\n\nclass DeltaInfo(Struct):\n \n \"\"\"Information about maintenance joins.\"\"\"\n \n rel = TypedField(str)\n \"\"\"Delta relation.\"\"\"\n elem = TypedField(L.AST)\n \"\"\"Delta element expression AST.\"\"\"\n lhs = TypedField(str, seq=True)\n \"\"\"Delta clause LHS identifier list.\"\"\"\n op = TypedField(str)\n \"\"\"'add' or 'remove'.\"\"\"\n \n @classmethod\n def from_options(cls, options):\n \"\"\"Construct from comprehension options dict.\n If delta info isn't provided, return None instead\n of an instance.\n \"\"\"\n if options is None or '_deltarel' not in options:\n return None\n \n rel = options['_deltarel']\n elem = options['_deltaelem']\n elem = L.pe(elem)\n lhs = options['_deltalhs']\n lhs = L.get_vartuple(L.pe(lhs))\n op = options['_deltaop']\n return cls(rel, elem, lhs, op)\n \n def __init__(self, rel, elem, lhs, op):\n assert op in ['add', 'remove']\n \n def updateopts(self, options):\n \"\"\"Return a modified options dict with the delta keys set.\"\"\"\n options = dict(options)\n options['_deltarel'] = self.rel\n options['_deltaelem'] = L.ts(self.elem)\n options['_deltalhs'] = L.ts(L.tuplify(self.lhs, lval=True))\n options['_deltaop'] = self.op\n return options\n\n\nclass Join(Struct):\n \n \"\"\"A join of one or more enumerator clauses and zero or more\n condition clauses. No definite order, cost, or code.\n \n Note that joins do not project out variables, unlike\n comprehensions.\n \"\"\"\n \n clauses = TypedField(Clause, seq=True)\n \"\"\"Sequence of clauses.\"\"\"\n factory = Field()\n \"\"\"Clause factory to use.\"\"\"\n delta = Field()\n \"\"\"DeltaInfo or None.\"\"\"\n \n @classmethod\n def from_comp(cls, node, factory):\n \"\"\"Construct from Comp node, ignoring the result expression.\n factory is used to construct clauses from their ASTs.\n \"\"\"\n checktype(node, L.Comp)\n clauses = [factory.from_AST(clast) for clast in node.clauses]\n delta = DeltaInfo.from_options(node.options)\n return cls(clauses, factory, delta)\n \n def __init__(self, clauses, factory, delta):\n assert len(clauses) > 0\n \n # Derived data.\n \n self.enumvars = elim_duplicates(tuple(chain.from_iterable(\n cl.enumvars for cl in self.clauses)))\n \"\"\"Tuple of all enumeration variables used by the clauses.\"\"\"\n \n self.vars = elim_duplicates(tuple(chain.from_iterable(\n cl.vars for cl in self.clauses)))\n \"\"\"Tuple of all variables used by the clauses.\"\"\"\n \n self.rels = elim_duplicates(tuple( \\\n cl.enumrel for cl in self.clauses\n if cl.enumrel is not None))\n \"\"\"Tuple of all sets iterated by the clauses.\"\"\"\n \n self.robust = all(cl.robust for cl in self.clauses)\n \"\"\"True if this join is meaningful when moved to a different\n program point. Required for incrementalization.\n \"\"\"\n self.inc_safe = all(cl.inc_safe for cl in self.clauses)\n \"\"\"True if this join is safe to incrementalize, aside from\n issues of robustness.\n \"\"\"\n \n self.has_wildcards = any(cl.has_wildcards for cl in self.clauses)\n \n self.has_demand = any(cl.has_demand for cl in self.clauses)\n \n def __str__(self):\n return ', '.join(str(cl) for cl in self.clauses)\n \n def to_comp(self, options):\n \"\"\"Create a corresponding Comp node.\"\"\"\n clauses = tuple(cl.to_AST() for cl in self.clauses)\n if self.delta is not None:\n options = self.delta.updateopts(options)\n return L.Comp(L.tuplify(self.enumvars),\n clauses, (), options)\n \n def rewrite_subst(self, subst):\n \"\"\"Return a modified Join that renames according to the given\n substitution mapping.\n \"\"\"\n new_clauses = [self.factory.rewrite_subst(cl, subst)\n for cl in self.clauses]\n return self._replace(clauses=new_clauses)\n \n def prefix_enumvars(self, prefix):\n \"\"\"Return a modified Join that has the given prefix on each\n enumeration variable.\n \"\"\"\n subst = {var: prefix + var for var in self.enumvars}\n return self.rewrite_subst(subst)\n \n def elim_equalities(self, keepvars):\n \"\"\"Produce a semantically equivalent join that eliminates\n equality conditions by unifying the equated variables.\n For instance, a condition \"x == y\" will be eliminated, and\n all occurrences of y in the remaining clauses will be replaced\n by uses of x.\n \n Variables that are listed in sequence keepvars will not be\n eliminated. If y is in keepvars and x is not, then uses of\n x will be renamed to y, not vice versa. If both x and y are\n in keepvars, the condition will be skipped and the variables\n not unified.\n \n Condition clauses that express a relational membership\n constraint are turned into enumerators.\n \n The new join and the variable substitution are returned.\n \n This operation is idempotent.\n \"\"\"\n part = Partitioning()\n new_clauses = []\n for cl in self.clauses:\n # Handle converting membership conditions to enumerators.\n if cl.kind is Clause.KIND_COND:\n cl = self.factory.membercond_to_enum(cl)\n \n # Skip if not an equation.\n if cl.eqvars is None:\n new_clauses.append(cl)\n continue\n lhs, rhs = cl.eqvars\n \n # Skip if both are keepvars.\n if lhs in keepvars and rhs in keepvars:\n new_clauses.append(cl)\n continue\n \n # Flip if necessary, to ensure keepvars are preserved.\n if rhs in keepvars and lhs not in keepvars:\n lhs, rhs = rhs, lhs\n \n part.equate(lhs, rhs)\n \n subst = part.to_subst()\n new_join = self._replace(clauses=new_clauses)\n new_join = new_join.rewrite_subst(subst)\n return new_join, subst\n \n def make_wildcards(self, keepvars):\n \"\"\"Produce a semantically equivalent join in which enumeration\n variables that occur only once are replaced by wildcards. This\n excludes cases where the variable occurs twice in the same\n clause, or where it occurs in condition clauses.\n \n Variables in keepvars are also excluded from this processing.\n \n This operation is idempotent.\n \"\"\"\n single_use = []\n for i, cl in enumerate(self.clauses):\n other_clauses = self.clauses[:i] + self.clauses[i+1:]\n for var in cl.enumvars:\n # Protect keepvars.\n if var in keepvars:\n continue\n # Check for multiple occurrences in this clause.\n if cl.enumlhs.count(var) > 1:\n continue\n # Check for any occurrence in other clauses.\n if any(var in cl2.vars for cl2 in other_clauses):\n continue\n \n single_use.append(var)\n \n subst = {var: '_' for var in single_use}\n return self.rewrite_subst(subst)\n \n def make_equalities(self, boundvars):\n \"\"\"Opposite of elim_equalities(). Produce a semantically\n equivalent join in which no enumeration variable appears more\n than once among the left-hand sides of all enumerators (whether\n in the same clause or different clauses). Multiple occurrences\n of the same variable get renamed to fresh variables, with new\n condition clauses added to equate the fresh variables to the\n first variable.\n \n Variables that appear in boundvars are considered to have\n occurred once already outside the join.\n \n Enumerators in which all enumeration variables have been\n replaced in this manner get turned into condition clauses.\n \n Some occurrences inside enumerators are not replaced,\n depending on the clause's pat_mask field.\n \"\"\"\n # Map from each enum var to a counter, used to make fresh\n # identifiers for its occurrences. \n repl_counts = defaultdict(lambda: 0)\n # Map from each enum var to a list of the names that will\n # be used to replace its occurrences, in order. Occurrences\n # that should not be replaced map to themselves in the list.\n # If the var is in boundvars, there is one extra occurrence\n # at the front of the list. Occurrences inside condition\n # clauses are not accounted for in the list.\n repl_map = defaultdict(lambda: [])\n \n def add_repl_occ(v):\n \"\"\"Process an occurrence that is subject to renaming.\"\"\"\n # Ignore wildcards.\n if v == '_':\n return\n # First occurrence is not renamed, later occurrences are.\n repl_counts[v] += 1\n occ_name = (v if repl_counts[v] == 1\n else v + '_' + str(repl_counts[v]))\n repl_map[v].append(occ_name)\n \n def add_skip_occ(v):\n \"\"\"Process an occurrence that is left unchanged.\"\"\"\n # Wildcards should not occur.\n # (Not sure about this, could make it just skip, like above.)\n assert v != '_'\n repl_map[v].append(v)\n \n for v in boundvars:\n add_repl_occ(v)\n \n # Process occurrences in the clauses. In addition,\n # all-bound enumerators get turned into condition clauses.\n new_clauses = []\n for cl in self.clauses:\n # Skip conditions.\n if cl.kind is Clause.KIND_COND:\n new_clauses.append(cl)\n continue\n \n # All-bound enum becomes a condition.\n if set(cl.enumlhs).issubset(repl_counts):\n cl = self.factory.enum_to_membercond(cl)\n new_clauses.append(cl)\n continue\n \n # Normal case.\n for v, p in zip(cl.enumlhs, cl.pat_mask):\n if p:\n add_repl_occ(v)\n else:\n add_skip_occ(v)\n new_clauses.append(cl)\n \n # Create new condition clauses to equate the new variables.\n new_conds = []\n for v in self.enumvars:\n repl_list = repl_map[v]\n for v1, v2 in pairs(repl_list):\n # No equality needed for identity replacements.\n if v == v2:\n continue\n condcl = self.factory.from_AST(L.cmpeq(L.ln(v1), L.ln(v2)))\n new_conds.append(condcl)\n \n # Define a substitution that calls a function to consume\n # the next identifier from the appropriate list.\n # For boundvars, start replacing at the second slot onward.\n \n for v in repl_map:\n if v in boundvars:\n repl_map[v].pop(0)\n \n def var_renamer(v):\n return repl_map[v].pop(0)\n \n # Use rewrite_lhs(), not rewrite_subst().\n # We don't want to rewrite demparams, for instance.\n subst = {v: var_renamer for v in self.enumvars}\n new_clauses = [self.factory.rewrite_lhs(cl, subst)\n if cl.kind is Clause.KIND_ENUM else cl\n for cl in new_clauses]\n \n # Insert each new condition clause immediately after both\n # equated variables have been seen.\n # For each clause in new_join, pull in the applicable cond\n # clauses and delete them from the new_conds list.\n new_clauses_with_conds = []\n seenvars = set(boundvars)\n for cl in new_clauses:\n new_clauses_with_conds.append(cl)\n seenvars.update(cl.enumvars)\n \n for condcl in list(new_conds):\n lhs, rhs = condcl.eqvars\n if lhs in seenvars and rhs in seenvars:\n new_conds.remove(condcl)\n new_clauses_with_conds.append(condcl)\n assert len(new_conds) == 0\n \n return self._replace(clauses=new_clauses_with_conds)\n \n def elim_wildcards(self):\n \"\"\"Opposite of make_wildcards(). Produce a semantically\n equivalent join in which wildcards have been replaced by\n fresh variables.\n \"\"\"\n fresh_names = L.NameGenerator(fmt='_v{}', counter=1)\n subst = {'_': lambda _: fresh_names.next()}\n \n new_clauses = [self.factory.rewrite_lhs(cl, subst)\n if cl.kind is Clause.KIND_ENUM else cl\n for cl in self.clauses]\n return self._replace(clauses=new_clauses)\n \n \n def get_maint_joins(self, elem, rel, op, prefix, *,\n disjoint_strat):\n \"\"\"Derive maintenance joins for when relation rel is updated\n by a single-element change of elem. Apply a prefix to these\n joins.\n \n If this join is not robust, TypeError is raised.\n \n disjoint_strat controls what strategy is used to ensure\n maintenance joins are disjoint.\n \n 'das': No strategy; joins are not necessarily disjoint\n 'sub': Subtractive clauses are used for clauses over\n rel to the right of the delta clause\n 'aug': Augmented clauses are used for clauses over\n rel to the right of the delta clause\n \"\"\"\n assert disjoint_strat in ['das', 'sub', 'aug']\n augmented = disjoint_strat == 'aug'\n \n for cl in self.clauses:\n if not (cl.robust and cl.inc_safe):\n raise AssertionError('Cannot incrementally maintain '\n 'join with fragile clause: ' + str(cl))\n \n clauses = list(self.prefix_enumvars(prefix).clauses)\n \n maint_joins = []\n for i, cl in enumerate(clauses):\n if cl.enumrel == rel:\n # Emit a join with this clause as the delta clause.\n delta = self.factory.bind(cl, elem, augmented=augmented)\n prev_clauses = clauses[:i]\n succ_clauses = clauses[i+1:]\n \n if disjoint_strat in ['sub', 'aug']:\n rewriter = {'sub': self.factory.subtract,\n 'aug': self.factory.augment}[disjoint_strat]\n succ_clauses = \\\n [rewriter(cl, elem) if cl.enumrel == rel else cl\n for cl in succ_clauses]\n \n new_clauses = prev_clauses + [delta] + succ_clauses\n join = self._replace(clauses=new_clauses)\n join = join._replace(delta=DeltaInfo(\n rel, elem, cl.enumlhs, op))\n maint_joins.append(join)\n \n return maint_joins\n \n def get_ordering(self, init_bounds, *, orderer=None):\n \"\"\"Get a clause ordering for a join. init_bounds is a sequence\n of the initially bound variables. orderer is the AsymptoticOrderer\n instance to use; if not provided, one will be created.\n \"\"\"\n if orderer is None:\n orderer = AsymptoticOrderer()\n \n return orderer.get_order(enumerate(self.clauses), init_bounds)\n \n def get_code(self, init_bounds, body, *, orderer=None,\n augmented):\n \"\"\"Make code for executing body once for each tuple in the\n join. init_bounds and orderer are as for get_ordering().\n \"\"\"\n ordering = self.get_ordering(init_bounds, orderer=orderer)\n clauses = [cl for _i, cl, _bindenv in ordering]\n \n # TODO: The work of maintaining a bindenv should be refactored\n # into the ordering logic. In fact, it's already there, just\n # not exposed.\n \n bindenvs = [set(init_bounds)]\n for cl in clauses:\n bindenvs.append(set(bindenvs[-1]).union(cl.enumvars))\n \n code = body\n for bindenv, cl in reversed(list(zip(bindenvs, clauses))):\n code = cl.get_code(bindenv, code)\n \n return code\n" }, { "alpha_fraction": 0.4172658622264862, "alphanum_fraction": 0.52972412109375, "avg_line_length": 51.02933883666992, "blob_id": "acca1fe7f1a0d6e7b146ee3327c440702adb579f", "content_id": "0fbefb7deeb51493e8fe2481a28c22244689cbfe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 21279, "license_type": "no_license", "max_line_length": 252, "num_lines": 409, "path": "/incoq/tests/programs/deminc/tup/inc_out.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from incoq.runtime import *\n# Comp1 := {(a, e) : _ in _U_Comp1, (a, _tup1) in R, (_tup1, b2, _) in _TUP2, (b2, _tup2) in R, (_tup2, _, e) in _TUP2}\n# Comp1_T_tup1 := {_tup1 : (a, _tup1) in R}\n# Comp1_d_TUP21 := {(_tup1, b2, _v1) : _tup1 in Comp1_T_tup1, (_tup1, b2, _v1) in _TUP2}\n# Comp1_Tb21 := {b2 : (_tup1, b2, _) in Comp1_d_TUP21}\n# Comp1_dR2 := {(b2, _tup2) : b2 in Comp1_Tb21, (b2, _tup2) in R}\n# Comp1_T_tup2 := {_tup2 : (b2, _tup2) in Comp1_dR2}\n# Comp1_d_TUP22 := {(_tup2, _v1, e) : _tup2 in Comp1_T_tup2, (_tup2, _v1, e) in _TUP2}\n_m_Comp1_out = Map()\ndef _maint__m_Comp1_out_add(_e):\n (v41_1, v41_2) = _e\n if (v41_1 not in _m_Comp1_out):\n _m_Comp1_out[v41_1] = set()\n _m_Comp1_out[v41_1].add(v41_2)\n\ndef _maint__m_Comp1_out_remove(_e):\n (v42_1, v42_2) = _e\n _m_Comp1_out[v42_1].remove(v42_2)\n if (len(_m_Comp1_out[v42_1]) == 0):\n del _m_Comp1_out[v42_1]\n\n_m_Comp1_dR2_in = Map()\ndef _maint__m_Comp1_dR2_in_add(_e):\n (v39_1, v39_2) = _e\n if (v39_2 not in _m_Comp1_dR2_in):\n _m_Comp1_dR2_in[v39_2] = set()\n _m_Comp1_dR2_in[v39_2].add(v39_1)\n\ndef _maint__m_Comp1_dR2_in_remove(_e):\n (v40_1, v40_2) = _e\n _m_Comp1_dR2_in[v40_2].remove(v40_1)\n if (len(_m_Comp1_dR2_in[v40_2]) == 0):\n del _m_Comp1_dR2_in[v40_2]\n\n_m_Comp1_d_TUP22_bwb = Map()\ndef _maint__m_Comp1_d_TUP22_bwb_add(_e):\n (v37_1, v37_2, v37_3) = _e\n if ((v37_1, v37_3) not in _m_Comp1_d_TUP22_bwb):\n _m_Comp1_d_TUP22_bwb[(v37_1, v37_3)] = RCSet()\n if (() not in _m_Comp1_d_TUP22_bwb[(v37_1, v37_3)]):\n _m_Comp1_d_TUP22_bwb[(v37_1, v37_3)].add(())\n else:\n _m_Comp1_d_TUP22_bwb[(v37_1, v37_3)].incref(())\n\ndef _maint__m_Comp1_d_TUP22_bwb_remove(_e):\n (v38_1, v38_2, v38_3) = _e\n if (_m_Comp1_d_TUP22_bwb[(v38_1, v38_3)].getref(()) == 1):\n _m_Comp1_d_TUP22_bwb[(v38_1, v38_3)].remove(())\n else:\n _m_Comp1_d_TUP22_bwb[(v38_1, v38_3)].decref(())\n if (len(_m_Comp1_d_TUP22_bwb[(v38_1, v38_3)]) == 0):\n del _m_Comp1_d_TUP22_bwb[(v38_1, v38_3)]\n\n_m_R_in = Map()\ndef _maint__m_R_in_add(_e):\n (v35_1, v35_2) = _e\n if (v35_2 not in _m_R_in):\n _m_R_in[v35_2] = set()\n _m_R_in[v35_2].add(v35_1)\n\n_m_Comp1_d_TUP21_ubw = Map()\ndef _maint__m_Comp1_d_TUP21_ubw_add(_e):\n (v33_1, v33_2, v33_3) = _e\n if (v33_2 not in _m_Comp1_d_TUP21_ubw):\n _m_Comp1_d_TUP21_ubw[v33_2] = RCSet()\n if (v33_1 not in _m_Comp1_d_TUP21_ubw[v33_2]):\n _m_Comp1_d_TUP21_ubw[v33_2].add(v33_1)\n else:\n _m_Comp1_d_TUP21_ubw[v33_2].incref(v33_1)\n\ndef _maint__m_Comp1_d_TUP21_ubw_remove(_e):\n (v34_1, v34_2, v34_3) = _e\n if (_m_Comp1_d_TUP21_ubw[v34_2].getref(v34_1) == 1):\n _m_Comp1_d_TUP21_ubw[v34_2].remove(v34_1)\n else:\n _m_Comp1_d_TUP21_ubw[v34_2].decref(v34_1)\n if (len(_m_Comp1_d_TUP21_ubw[v34_2]) == 0):\n del _m_Comp1_d_TUP21_ubw[v34_2]\n\n_m_Comp1_dR2_out = Map()\ndef _maint__m_Comp1_dR2_out_add(_e):\n (v31_1, v31_2) = _e\n if (v31_1 not in _m_Comp1_dR2_out):\n _m_Comp1_dR2_out[v31_1] = set()\n _m_Comp1_dR2_out[v31_1].add(v31_2)\n\ndef _maint__m_Comp1_dR2_out_remove(_e):\n (v32_1, v32_2) = _e\n _m_Comp1_dR2_out[v32_1].remove(v32_2)\n if (len(_m_Comp1_dR2_out[v32_1]) == 0):\n del _m_Comp1_dR2_out[v32_1]\n\n_m__U_Comp1_w = Map()\ndef _maint__m__U_Comp1_w_add(_e):\n if (() not in _m__U_Comp1_w):\n _m__U_Comp1_w[()] = RCSet()\n if (() not in _m__U_Comp1_w[()]):\n _m__U_Comp1_w[()].add(())\n else:\n _m__U_Comp1_w[()].incref(())\n\ndef _maint__m__U_Comp1_w_remove(_e):\n if (_m__U_Comp1_w[()].getref(()) == 1):\n _m__U_Comp1_w[()].remove(())\n else:\n _m__U_Comp1_w[()].decref(())\n if (len(_m__U_Comp1_w[()]) == 0):\n del _m__U_Comp1_w[()]\n\n_m_Comp1_d_TUP21_bbw = Map()\ndef _maint__m_Comp1_d_TUP21_bbw_add(_e):\n (v27_1, v27_2, v27_3) = _e\n if ((v27_1, v27_2) not in _m_Comp1_d_TUP21_bbw):\n _m_Comp1_d_TUP21_bbw[(v27_1, v27_2)] = RCSet()\n if (() not in _m_Comp1_d_TUP21_bbw[(v27_1, v27_2)]):\n _m_Comp1_d_TUP21_bbw[(v27_1, v27_2)].add(())\n else:\n _m_Comp1_d_TUP21_bbw[(v27_1, v27_2)].incref(())\n\ndef _maint__m_Comp1_d_TUP21_bbw_remove(_e):\n (v28_1, v28_2, v28_3) = _e\n if (_m_Comp1_d_TUP21_bbw[(v28_1, v28_2)].getref(()) == 1):\n _m_Comp1_d_TUP21_bbw[(v28_1, v28_2)].remove(())\n else:\n _m_Comp1_d_TUP21_bbw[(v28_1, v28_2)].decref(())\n if (len(_m_Comp1_d_TUP21_bbw[(v28_1, v28_2)]) == 0):\n del _m_Comp1_d_TUP21_bbw[(v28_1, v28_2)]\n\n_m_R_out = Map()\ndef _maint__m_R_out_add(_e):\n (v25_1, v25_2) = _e\n if (v25_1 not in _m_R_out):\n _m_R_out[v25_1] = set()\n _m_R_out[v25_1].add(v25_2)\n\ndef _maint_Comp1_d_TUP22_Comp1_T_tup2_add(_e):\n # Iterate {(v21__tup2, v21__v1, v21_e) : v21__tup2 in deltamatch(Comp1_T_tup2, 'b', _e, 1), (v21__tup2, v21__v1, v21_e) in _TUP2}\n v21__tup2 = _e\n if (isinstance(v21__tup2, tuple) and (len(v21__tup2) == 2)):\n for (v21__v1, v21_e) in setmatch({(v21__tup2, v21__tup2[0], v21__tup2[1])}, 'buu', v21__tup2):\n # Begin maint _m_Comp1_d_TUP22_bwb after \"Comp1_d_TUP22.add((v21__tup2, v21__v1, v21_e))\"\n _maint__m_Comp1_d_TUP22_bwb_add((v21__tup2, v21__v1, v21_e))\n # End maint _m_Comp1_d_TUP22_bwb after \"Comp1_d_TUP22.add((v21__tup2, v21__v1, v21_e))\"\n\ndef _maint_Comp1_d_TUP22_Comp1_T_tup2_remove(_e):\n # Iterate {(v22__tup2, v22__v1, v22_e) : v22__tup2 in deltamatch(Comp1_T_tup2, 'b', _e, 1), (v22__tup2, v22__v1, v22_e) in _TUP2}\n v22__tup2 = _e\n if (isinstance(v22__tup2, tuple) and (len(v22__tup2) == 2)):\n for (v22__v1, v22_e) in setmatch({(v22__tup2, v22__tup2[0], v22__tup2[1])}, 'buu', v22__tup2):\n # Begin maint _m_Comp1_d_TUP22_bwb before \"Comp1_d_TUP22.remove((v22__tup2, v22__v1, v22_e))\"\n _maint__m_Comp1_d_TUP22_bwb_remove((v22__tup2, v22__v1, v22_e))\n # End maint _m_Comp1_d_TUP22_bwb before \"Comp1_d_TUP22.remove((v22__tup2, v22__v1, v22_e))\"\n\nComp1_T_tup2 = RCSet()\ndef _maint_Comp1_T_tup2_Comp1_dR2_add(_e):\n # Iterate {(v19_b2, v19__tup2) : (v19_b2, v19__tup2) in deltamatch(Comp1_dR2, 'bb', _e, 1)}\n (v19_b2, v19__tup2) = _e\n if (v19__tup2 not in Comp1_T_tup2):\n Comp1_T_tup2.add(v19__tup2)\n # Begin maint Comp1_d_TUP22 after \"Comp1_T_tup2.add(v19__tup2)\"\n _maint_Comp1_d_TUP22_Comp1_T_tup2_add(v19__tup2)\n # End maint Comp1_d_TUP22 after \"Comp1_T_tup2.add(v19__tup2)\"\n else:\n Comp1_T_tup2.incref(v19__tup2)\n\ndef _maint_Comp1_T_tup2_Comp1_dR2_remove(_e):\n # Iterate {(v20_b2, v20__tup2) : (v20_b2, v20__tup2) in deltamatch(Comp1_dR2, 'bb', _e, 1)}\n (v20_b2, v20__tup2) = _e\n if (Comp1_T_tup2.getref(v20__tup2) == 1):\n # Begin maint Comp1_d_TUP22 before \"Comp1_T_tup2.remove(v20__tup2)\"\n _maint_Comp1_d_TUP22_Comp1_T_tup2_remove(v20__tup2)\n # End maint Comp1_d_TUP22 before \"Comp1_T_tup2.remove(v20__tup2)\"\n Comp1_T_tup2.remove(v20__tup2)\n else:\n Comp1_T_tup2.decref(v20__tup2)\n\nComp1_dR2 = RCSet()\ndef _maint_Comp1_dR2_Comp1_Tb21_add(_e):\n # Iterate {(v15_b2, v15__tup2) : v15_b2 in deltamatch(Comp1_Tb21, 'b', _e, 1), (v15_b2, v15__tup2) in R}\n v15_b2 = _e\n for v15__tup2 in (_m_R_out[v15_b2] if (v15_b2 in _m_R_out) else set()):\n Comp1_dR2.add((v15_b2, v15__tup2))\n # Begin maint _m_Comp1_dR2_in after \"Comp1_dR2.add((v15_b2, v15__tup2))\"\n _maint__m_Comp1_dR2_in_add((v15_b2, v15__tup2))\n # End maint _m_Comp1_dR2_in after \"Comp1_dR2.add((v15_b2, v15__tup2))\"\n # Begin maint _m_Comp1_dR2_out after \"Comp1_dR2.add((v15_b2, v15__tup2))\"\n _maint__m_Comp1_dR2_out_add((v15_b2, v15__tup2))\n # End maint _m_Comp1_dR2_out after \"Comp1_dR2.add((v15_b2, v15__tup2))\"\n # Begin maint Comp1_T_tup2 after \"Comp1_dR2.add((v15_b2, v15__tup2))\"\n _maint_Comp1_T_tup2_Comp1_dR2_add((v15_b2, v15__tup2))\n # End maint Comp1_T_tup2 after \"Comp1_dR2.add((v15_b2, v15__tup2))\"\n\ndef _maint_Comp1_dR2_Comp1_Tb21_remove(_e):\n # Iterate {(v16_b2, v16__tup2) : v16_b2 in deltamatch(Comp1_Tb21, 'b', _e, 1), (v16_b2, v16__tup2) in R}\n v16_b2 = _e\n for v16__tup2 in (_m_R_out[v16_b2] if (v16_b2 in _m_R_out) else set()):\n # Begin maint Comp1_T_tup2 before \"Comp1_dR2.remove((v16_b2, v16__tup2))\"\n _maint_Comp1_T_tup2_Comp1_dR2_remove((v16_b2, v16__tup2))\n # End maint Comp1_T_tup2 before \"Comp1_dR2.remove((v16_b2, v16__tup2))\"\n # Begin maint _m_Comp1_dR2_out before \"Comp1_dR2.remove((v16_b2, v16__tup2))\"\n _maint__m_Comp1_dR2_out_remove((v16_b2, v16__tup2))\n # End maint _m_Comp1_dR2_out before \"Comp1_dR2.remove((v16_b2, v16__tup2))\"\n # Begin maint _m_Comp1_dR2_in before \"Comp1_dR2.remove((v16_b2, v16__tup2))\"\n _maint__m_Comp1_dR2_in_remove((v16_b2, v16__tup2))\n # End maint _m_Comp1_dR2_in before \"Comp1_dR2.remove((v16_b2, v16__tup2))\"\n Comp1_dR2.remove((v16_b2, v16__tup2))\n\ndef _maint_Comp1_dR2_R_add(_e):\n # Iterate {(v17_b2, v17__tup2) : v17_b2 in Comp1_Tb21, (v17_b2, v17__tup2) in deltamatch(R, 'bb', _e, 1)}\n (v17_b2, v17__tup2) = _e\n if (v17_b2 in Comp1_Tb21):\n Comp1_dR2.add((v17_b2, v17__tup2))\n # Begin maint _m_Comp1_dR2_in after \"Comp1_dR2.add((v17_b2, v17__tup2))\"\n _maint__m_Comp1_dR2_in_add((v17_b2, v17__tup2))\n # End maint _m_Comp1_dR2_in after \"Comp1_dR2.add((v17_b2, v17__tup2))\"\n # Begin maint _m_Comp1_dR2_out after \"Comp1_dR2.add((v17_b2, v17__tup2))\"\n _maint__m_Comp1_dR2_out_add((v17_b2, v17__tup2))\n # End maint _m_Comp1_dR2_out after \"Comp1_dR2.add((v17_b2, v17__tup2))\"\n # Begin maint Comp1_T_tup2 after \"Comp1_dR2.add((v17_b2, v17__tup2))\"\n _maint_Comp1_T_tup2_Comp1_dR2_add((v17_b2, v17__tup2))\n # End maint Comp1_T_tup2 after \"Comp1_dR2.add((v17_b2, v17__tup2))\"\n\nComp1_Tb21 = RCSet()\ndef _maint_Comp1_Tb21_Comp1_d_TUP21_add(_e):\n # Iterate {(v13__tup1, v13_b2) : (v13__tup1, v13_b2, _) in deltamatch(Comp1_d_TUP21, 'bbw', _e, 1)}\n for (v13__tup1, v13_b2) in setmatch(({_e} if ((_m_Comp1_d_TUP21_bbw[(_e[0], _e[1])] if ((_e[0], _e[1]) in _m_Comp1_d_TUP21_bbw) else RCSet()).getref(()) == 1) else {}), 'uuw', ()):\n if (v13_b2 not in Comp1_Tb21):\n Comp1_Tb21.add(v13_b2)\n # Begin maint Comp1_dR2 after \"Comp1_Tb21.add(v13_b2)\"\n _maint_Comp1_dR2_Comp1_Tb21_add(v13_b2)\n # End maint Comp1_dR2 after \"Comp1_Tb21.add(v13_b2)\"\n else:\n Comp1_Tb21.incref(v13_b2)\n\ndef _maint_Comp1_Tb21_Comp1_d_TUP21_remove(_e):\n # Iterate {(v14__tup1, v14_b2) : (v14__tup1, v14_b2, _) in deltamatch(Comp1_d_TUP21, 'bbw', _e, 1)}\n for (v14__tup1, v14_b2) in setmatch(({_e} if ((_m_Comp1_d_TUP21_bbw[(_e[0], _e[1])] if ((_e[0], _e[1]) in _m_Comp1_d_TUP21_bbw) else RCSet()).getref(()) == 1) else {}), 'uuw', ()):\n if (Comp1_Tb21.getref(v14_b2) == 1):\n # Begin maint Comp1_dR2 before \"Comp1_Tb21.remove(v14_b2)\"\n _maint_Comp1_dR2_Comp1_Tb21_remove(v14_b2)\n # End maint Comp1_dR2 before \"Comp1_Tb21.remove(v14_b2)\"\n Comp1_Tb21.remove(v14_b2)\n else:\n Comp1_Tb21.decref(v14_b2)\n\ndef _maint_Comp1_d_TUP21_Comp1_T_tup1_add(_e):\n # Iterate {(v9__tup1, v9_b2, v9__v1) : v9__tup1 in deltamatch(Comp1_T_tup1, 'b', _e, 1), (v9__tup1, v9_b2, v9__v1) in _TUP2}\n v9__tup1 = _e\n if (isinstance(v9__tup1, tuple) and (len(v9__tup1) == 2)):\n for (v9_b2, v9__v1) in setmatch({(v9__tup1, v9__tup1[0], v9__tup1[1])}, 'buu', v9__tup1):\n # Begin maint _m_Comp1_d_TUP21_ubw after \"Comp1_d_TUP21.add((v9__tup1, v9_b2, v9__v1))\"\n _maint__m_Comp1_d_TUP21_ubw_add((v9__tup1, v9_b2, v9__v1))\n # End maint _m_Comp1_d_TUP21_ubw after \"Comp1_d_TUP21.add((v9__tup1, v9_b2, v9__v1))\"\n # Begin maint _m_Comp1_d_TUP21_bbw after \"Comp1_d_TUP21.add((v9__tup1, v9_b2, v9__v1))\"\n _maint__m_Comp1_d_TUP21_bbw_add((v9__tup1, v9_b2, v9__v1))\n # End maint _m_Comp1_d_TUP21_bbw after \"Comp1_d_TUP21.add((v9__tup1, v9_b2, v9__v1))\"\n # Begin maint Comp1_Tb21 after \"Comp1_d_TUP21.add((v9__tup1, v9_b2, v9__v1))\"\n _maint_Comp1_Tb21_Comp1_d_TUP21_add((v9__tup1, v9_b2, v9__v1))\n # End maint Comp1_Tb21 after \"Comp1_d_TUP21.add((v9__tup1, v9_b2, v9__v1))\"\n\ndef _maint_Comp1_d_TUP21_Comp1_T_tup1_remove(_e):\n # Iterate {(v10__tup1, v10_b2, v10__v1) : v10__tup1 in deltamatch(Comp1_T_tup1, 'b', _e, 1), (v10__tup1, v10_b2, v10__v1) in _TUP2}\n v10__tup1 = _e\n if (isinstance(v10__tup1, tuple) and (len(v10__tup1) == 2)):\n for (v10_b2, v10__v1) in setmatch({(v10__tup1, v10__tup1[0], v10__tup1[1])}, 'buu', v10__tup1):\n # Begin maint Comp1_Tb21 before \"Comp1_d_TUP21.remove((v10__tup1, v10_b2, v10__v1))\"\n _maint_Comp1_Tb21_Comp1_d_TUP21_remove((v10__tup1, v10_b2, v10__v1))\n # End maint Comp1_Tb21 before \"Comp1_d_TUP21.remove((v10__tup1, v10_b2, v10__v1))\"\n # Begin maint _m_Comp1_d_TUP21_bbw before \"Comp1_d_TUP21.remove((v10__tup1, v10_b2, v10__v1))\"\n _maint__m_Comp1_d_TUP21_bbw_remove((v10__tup1, v10_b2, v10__v1))\n # End maint _m_Comp1_d_TUP21_bbw before \"Comp1_d_TUP21.remove((v10__tup1, v10_b2, v10__v1))\"\n # Begin maint _m_Comp1_d_TUP21_ubw before \"Comp1_d_TUP21.remove((v10__tup1, v10_b2, v10__v1))\"\n _maint__m_Comp1_d_TUP21_ubw_remove((v10__tup1, v10_b2, v10__v1))\n # End maint _m_Comp1_d_TUP21_ubw before \"Comp1_d_TUP21.remove((v10__tup1, v10_b2, v10__v1))\"\n\nComp1_T_tup1 = RCSet()\ndef _maint_Comp1_T_tup1_R_add(_e):\n # Iterate {(v7_a, v7__tup1) : (v7_a, v7__tup1) in deltamatch(R, 'bb', _e, 1)}\n (v7_a, v7__tup1) = _e\n if (v7__tup1 not in Comp1_T_tup1):\n Comp1_T_tup1.add(v7__tup1)\n # Begin maint Comp1_d_TUP21 after \"Comp1_T_tup1.add(v7__tup1)\"\n _maint_Comp1_d_TUP21_Comp1_T_tup1_add(v7__tup1)\n # End maint Comp1_d_TUP21 after \"Comp1_T_tup1.add(v7__tup1)\"\n else:\n Comp1_T_tup1.incref(v7__tup1)\n\nComp1 = RCSet()\ndef _maint_Comp1__U_Comp1_add(_e):\n # Iterate {(v1_a, v1__tup1, v1_b2, v1__tup2, v1_e) : _ in deltamatch(_U_Comp1, 'w', _e, 1), (v1_a, v1__tup1) in R, (v1__tup1, v1_b2, _) in _TUP2, (v1_b2, v1__tup2) in Comp1_dR2, (v1__tup2, _, v1_e) in _TUP2}\n for _ in setmatch(({_e} if ((_m__U_Comp1_w[()] if (() in _m__U_Comp1_w) else RCSet()).getref(()) == 1) else {}), 'w', ()):\n for (v1_a, v1__tup1) in R:\n if (isinstance(v1__tup1, tuple) and (len(v1__tup1) == 2)):\n for v1_b2 in setmatch({(v1__tup1, v1__tup1[0], v1__tup1[1])}, 'buw', v1__tup1):\n for v1__tup2 in (_m_Comp1_dR2_out[v1_b2] if (v1_b2 in _m_Comp1_dR2_out) else set()):\n if (isinstance(v1__tup2, tuple) and (len(v1__tup2) == 2)):\n for v1_e in setmatch({(v1__tup2, v1__tup2[0], v1__tup2[1])}, 'bwu', v1__tup2):\n if ((v1_a, v1_e) not in Comp1):\n Comp1.add((v1_a, v1_e))\n # Begin maint _m_Comp1_out after \"Comp1.add((v1_a, v1_e))\"\n _maint__m_Comp1_out_add((v1_a, v1_e))\n # End maint _m_Comp1_out after \"Comp1.add((v1_a, v1_e))\"\n else:\n Comp1.incref((v1_a, v1_e))\n\ndef _maint_Comp1__U_Comp1_remove(_e):\n # Iterate {(v2_a, v2__tup1, v2_b2, v2__tup2, v2_e) : _ in deltamatch(_U_Comp1, 'w', _e, 1), (v2_a, v2__tup1) in R, (v2__tup1, v2_b2, _) in _TUP2, (v2_b2, v2__tup2) in Comp1_dR2, (v2__tup2, _, v2_e) in _TUP2}\n for _ in setmatch(({_e} if ((_m__U_Comp1_w[()] if (() in _m__U_Comp1_w) else RCSet()).getref(()) == 1) else {}), 'w', ()):\n for (v2_a, v2__tup1) in R:\n if (isinstance(v2__tup1, tuple) and (len(v2__tup1) == 2)):\n for v2_b2 in setmatch({(v2__tup1, v2__tup1[0], v2__tup1[1])}, 'buw', v2__tup1):\n for v2__tup2 in (_m_Comp1_dR2_out[v2_b2] if (v2_b2 in _m_Comp1_dR2_out) else set()):\n if (isinstance(v2__tup2, tuple) and (len(v2__tup2) == 2)):\n for v2_e in setmatch({(v2__tup2, v2__tup2[0], v2__tup2[1])}, 'bwu', v2__tup2):\n if (Comp1.getref((v2_a, v2_e)) == 1):\n # Begin maint _m_Comp1_out before \"Comp1.remove((v2_a, v2_e))\"\n _maint__m_Comp1_out_remove((v2_a, v2_e))\n # End maint _m_Comp1_out before \"Comp1.remove((v2_a, v2_e))\"\n Comp1.remove((v2_a, v2_e))\n else:\n Comp1.decref((v2_a, v2_e))\n\ndef _maint_Comp1_R_add(_e):\n v3_DAS = set()\n # Iterate {(v3_a, v3__tup1, v3_b2, v3__tup2, v3_e) : _ in _U_Comp1, (v3_a, v3__tup1) in deltamatch(R, 'bb', _e, 1), (v3__tup1, v3_b2, _) in _TUP2, (v3_b2, v3__tup2) in Comp1_dR2, (v3__tup2, _, v3_e) in _TUP2}\n (v3_a, v3__tup1) = _e\n if (isinstance(v3__tup1, tuple) and (len(v3__tup1) == 2)):\n for v3_b2 in setmatch({(v3__tup1, v3__tup1[0], v3__tup1[1])}, 'buw', v3__tup1):\n for v3__tup2 in (_m_Comp1_dR2_out[v3_b2] if (v3_b2 in _m_Comp1_dR2_out) else set()):\n if (isinstance(v3__tup2, tuple) and (len(v3__tup2) == 2)):\n for v3_e in setmatch({(v3__tup2, v3__tup2[0], v3__tup2[1])}, 'bwu', v3__tup2):\n for _ in (_m__U_Comp1_w[()] if (() in _m__U_Comp1_w) else RCSet()):\n if ((v3_a, v3__tup1, v3_b2, v3__tup2, v3_e) not in v3_DAS):\n v3_DAS.add((v3_a, v3__tup1, v3_b2, v3__tup2, v3_e))\n # Iterate {(v3_a, v3__tup1, v3_b2, v3__tup2, v3_e) : _ in _U_Comp1, (v3_a, v3__tup1) in R, (v3__tup1, v3_b2, _) in Comp1_d_TUP21, (v3_b2, v3__tup2) in deltamatch(Comp1_dR2, 'bb', _e, 1), (v3_b2, v3__tup2) in Comp1_dR2, (v3__tup2, _, v3_e) in _TUP2}\n (v3_b2, v3__tup2) = _e\n if ((v3_b2, v3__tup2) in Comp1_dR2):\n if (isinstance(v3__tup2, tuple) and (len(v3__tup2) == 2)):\n for v3_e in setmatch({(v3__tup2, v3__tup2[0], v3__tup2[1])}, 'bwu', v3__tup2):\n for v3__tup1 in (_m_Comp1_d_TUP21_ubw[v3_b2] if (v3_b2 in _m_Comp1_d_TUP21_ubw) else RCSet()):\n for v3_a in (_m_R_in[v3__tup1] if (v3__tup1 in _m_R_in) else set()):\n for _ in (_m__U_Comp1_w[()] if (() in _m__U_Comp1_w) else RCSet()):\n if ((v3_a, v3__tup1, v3_b2, v3__tup2, v3_e) not in v3_DAS):\n v3_DAS.add((v3_a, v3__tup1, v3_b2, v3__tup2, v3_e))\n for (v3_a, v3__tup1, v3_b2, v3__tup2, v3_e) in v3_DAS:\n if ((v3_a, v3_e) not in Comp1):\n Comp1.add((v3_a, v3_e))\n # Begin maint _m_Comp1_out after \"Comp1.add((v3_a, v3_e))\"\n _maint__m_Comp1_out_add((v3_a, v3_e))\n # End maint _m_Comp1_out after \"Comp1.add((v3_a, v3_e))\"\n else:\n Comp1.incref((v3_a, v3_e))\n del v3_DAS\n\n_U_Comp1 = RCSet()\n_UEXT_Comp1 = Set()\ndef demand_Comp1():\n '{(a, e) : _ in _U_Comp1, (a, _tup1) in R, (_tup1, b2, _) in _TUP2, (b2, _tup2) in R, (_tup2, _, e) in _TUP2}'\n if (() not in _U_Comp1):\n _U_Comp1.add(())\n # Begin maint _m__U_Comp1_w after \"_U_Comp1.add(())\"\n _maint__m__U_Comp1_w_add(())\n # End maint _m__U_Comp1_w after \"_U_Comp1.add(())\"\n # Begin maint Comp1 after \"_U_Comp1.add(())\"\n _maint_Comp1__U_Comp1_add(())\n # End maint Comp1 after \"_U_Comp1.add(())\"\n else:\n _U_Comp1.incref(())\n\ndef undemand_Comp1():\n '{(a, e) : _ in _U_Comp1, (a, _tup1) in R, (_tup1, b2, _) in _TUP2, (b2, _tup2) in R, (_tup2, _, e) in _TUP2}'\n if (_U_Comp1.getref(()) == 1):\n # Begin maint Comp1 before \"_U_Comp1.remove(())\"\n _maint_Comp1__U_Comp1_remove(())\n # End maint Comp1 before \"_U_Comp1.remove(())\"\n # Begin maint _m__U_Comp1_w before \"_U_Comp1.remove(())\"\n _maint__m__U_Comp1_w_remove(())\n # End maint _m__U_Comp1_w before \"_U_Comp1.remove(())\"\n _U_Comp1.remove(())\n else:\n _U_Comp1.decref(())\n\ndef query_Comp1():\n '{(a, e) : _ in _U_Comp1, (a, _tup1) in R, (_tup1, b2, _) in _TUP2, (b2, _tup2) in R, (_tup2, _, e) in _TUP2}'\n if (() not in _UEXT_Comp1):\n _UEXT_Comp1.add(())\n demand_Comp1()\n return True\n\nR = Set()\nfor (x, y) in [(1, (2, 3)), (2, (3, 4)), (3, (4, 5))]:\n R.add((x, y))\n # Begin maint _m_R_in after \"R.add((x, y))\"\n _maint__m_R_in_add((x, y))\n # End maint _m_R_in after \"R.add((x, y))\"\n # Begin maint _m_R_out after \"R.add((x, y))\"\n _maint__m_R_out_add((x, y))\n # End maint _m_R_out after \"R.add((x, y))\"\n # Begin maint Comp1_dR2 after \"R.add((x, y))\"\n _maint_Comp1_dR2_R_add((x, y))\n # End maint Comp1_dR2 after \"R.add((x, y))\"\n # Begin maint Comp1_T_tup1 after \"R.add((x, y))\"\n _maint_Comp1_T_tup1_R_add((x, y))\n # End maint Comp1_T_tup1 after \"R.add((x, y))\"\n # Begin maint Comp1 after \"R.add((x, y))\"\n _maint_Comp1_R_add((x, y))\n # End maint Comp1 after \"R.add((x, y))\"\na = 1\nprint(sorted((query_Comp1() and (_m_Comp1_out[a] if (a in _m_Comp1_out) else set()))))" }, { "alpha_fraction": 0.6233852505683899, "alphanum_fraction": 0.6296787261962891, "avg_line_length": 26.953702926635742, "blob_id": "54774f25c2c505dcbeaac8554f7c6ca0c280728c", "content_id": "13ad290a5fb5ac5473c3c21d3fdb3bbb4b500b1a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3019, "license_type": "no_license", "max_line_length": 102, "num_lines": 108, "path": "/experiments/distalgo/distalgo_bridge.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Wrapper for launching DistAlgo programs.\n\nThis module provides a launch() function that invokes the interpreter\non this module in a subprocess. The __main__ routine in turn calls\nthe DistAlgo entry point.\n\"\"\"\n\n\nimport os\nimport json\nimport subprocess\nimport re\nimport configparser\nfrom os.path import join\nfrom types import SimpleNamespace\n\n\nclass DistAlgoError(subprocess.CalledProcessError):\n \n def __str__(self):\n return ('Command {} returned non-zero exit status {}\\n'\n 'stderr output:\\n{}'.format(\n self.cmd, self.returncode, self.output))\n\n\ndef get_config():\n \"\"\"Read config.txt to determine appropriate environment variables\n and paths.\n \"\"\"\n config = configparser.ConfigParser()\n dirname = os.path.dirname(__file__)\n config.read(join(dirname, '../config.txt'))\n pyconf = config['python']\n \n ns = SimpleNamespace()\n \n ns.python34 = pyconf['python34']\n ns.incoq_root = pyconf['INCOQ_ROOT']\n ns.distalgo_path = pyconf['DISTALGO_PATH']\n \n da_exp_dir = os.path.join(ns.incoq_root, 'experiments/distalgo')\n ns.pythonpath = (ns.incoq_root + ';' + ns.distalgo_path + ';' +\n da_exp_dir)\n \n return ns\n\n\ndef parse_output(s):\n \"\"\"Parse a string of standard output text for the \"OUTPUT: <JSON>\"\n line and return the parsed JSON object.\n \"\"\"\n m = re.search(r'^OUTPUT: (.*)', s, re.MULTILINE)\n if m is None:\n return None\n return json.loads(m.group(1))\n\n\ndef launch(config, dafile, incfile, daargs):\n \"\"\"Launch the specified DistAlgo program in a subprocess that\n captures/parses standard output and error. Return a JSON object\n obtained by parsing stdout for a line \"OUTPUT: <JSON>\", where\n <JSON> is JSON-encoded data.\n \"\"\"\n python = config.python34\n dirname = os.path.dirname(__file__)\n \n env = dict(os.environ.items())\n # Don't let python33's standard library paths override\n # python 34's.\n env['PYTHONPATH'] = config.pythonpath\n \n args = [\n python,\n __file__,\n '-i',\n '-m',\n incfile,\n dafile,\n ]\n args.extend(daargs)\n \n child = subprocess.Popen(\n args, bufsize=-1,\n # To debug, comment out this line to make stdout/stderr\n # the same standard out and error streams as the parent.\n # Alternatively (if the process terminates), uncomment\n # the print statements below.\n # In the future, maybe use something like\n # http://stackoverflow.com/questions/375427/non-blocking-read-on-a-subprocess-pipe-in-python\n stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n cwd=dirname,\n env=env,\n universal_newlines=True)\n \n stdout, stderr = child.communicate()\n results = parse_output(stdout)\n# print(stderr)\n# print(stdout)\n \n if child.returncode != 0:\n raise DistAlgoError(child.returncode, args, stderr)\n \n return results\n\n\nif __name__ == '__main__':\n import da\n da.libmain()\n" }, { "alpha_fraction": 0.43117809295654297, "alphanum_fraction": 0.4412931501865387, "avg_line_length": 30.123456954956055, "blob_id": "10457a837254630adb468cd11129f72d7bca76bc", "content_id": "2f61746039a9523210709e8bca7732540ec3fb6f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5042, "license_type": "no_license", "max_line_length": 73, "num_lines": 162, "path": "/incoq/tests/invinc/incast/test_treeconv.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Unit tests for treeconv.py.\"\"\"\n\n\nimport unittest\n\nfrom incoq.compiler.incast.nodes import *\nfrom incoq.compiler.incast.structconv import parse_structast\nfrom incoq.compiler.incast.error import ProgramError\nfrom incoq.compiler.incast.macros import IncMacroProcessor\nfrom incoq.compiler.incast.treeconv import *\n\n\nclass TreeconvCase(unittest.TestCase):\n \n def p(self, source, mode=None):\n tree = parse_structast(source, mode=mode)\n tree = IncMacroProcessor.run(tree)\n return tree\n \n def pc(self, source):\n return self.p(source, mode='code')\n \n def pe(self, source):\n return self.p(source, mode='expr')\n \n def test_runtimelib(self):\n tree = self.p('''\n pass\n from incoq.runtime import *\n from foo import *\n from incoq.runtime import *\n ''')\n tree = remove_runtimelib(tree)\n exp_tree = self.p('''\n pass\n from foo import *\n ''')\n self.assertEqual(tree, exp_tree)\n \n tree = add_runtimelib(exp_tree)\n exp_tree = self.p('''\n from incoq.runtime import *\n pass\n from foo import *\n ''')\n self.assertEqual(tree, exp_tree)\n \n def test_options_reader(self):\n tree = self.p('''\n OPTIONS(a = 1, b = 2)\n OPTIONS(c = 3)\n foo\n QUERYOPTIONS('1 + 1', a = 1, b = 2)\n QUERYOPTIONS('2 + 2')\n ''')\n opts = ({'z': 5}, {self.pe('3 + 3'): {'z': 10}})\n tree, (nopts, qopts) = OptionsParser.run(tree, opts)\n \n exp_tree = self.p('foo')\n exp_nopts = {'a': 1, 'b': 2, 'c': 3, 'z': 5}\n exp_qopts = {self.pe('1 + 1'): {'a': 1, 'b': 2},\n self.pe('2 + 2'): {},\n self.pe('3 + 3'): {'z': 10}}\n \n self.assertEqual(tree, exp_tree)\n self.assertEqual(nopts, exp_nopts)\n self.assertEqual(qopts, exp_qopts)\n \n # Make sure we raise errors on redundant options.\n \n tree = self.p('''\n OPTIONS(a = 1)\n OPTIONS(a = 2)\n ''')\n with self.assertRaises(ProgramError):\n OptionsParser.run(tree)\n \n tree = self.p('''\n QUERYOPTIONS('1 + 1', a = 1)\n QUERYOPTIONS('(1 + 1)', b = 2)\n ''')\n with self.assertRaises(ProgramError):\n OptionsParser.run(tree)\n \n def test_infer_params(self):\n tree = self.p('''\n {y for (x, y) in R}\n x = 1\n {y for (x, y) in R}\n {y for (x, y) in {y for (x, y) in R}}\n COMP({y for (x, y) in R}, [y])\n ''')\n tree = infer_params(tree, obj_domain=False)\n \n exp_tree = self.p('''\n COMP({y for (x, y) in R}, [])\n x = 1\n COMP({y for (x, y) in R}, [x], None)\n COMP({y for (x, y) in\n COMP({y for (x, y) in R}, [x], None)},\n [x], None)\n COMP({y for (x, y) in R}, [y], None)\n ''')\n \n self.assertEqual(tree, exp_tree)\n \n def test_attach_qopts(self):\n q1 = '{x for (x, y) in R}'\n q2 = '{y for (x, y) in R if sum({x for (x, y) in R}) > 5}'\n q3 = 'sum({x for (x, y) in R})'\n tree = self.p(q2)\n opts = ({}, {self.pe(q1): {'a': 'b', 'params': ['c']},\n self.pe(q2): {'d': 'e'},\n self.pe(q3): {'f': 'g'},\n self.pe('foo'): {}})\n tree, unused = attach_qopts_info(tree, opts)\n \n exp_tree = self.p('''\n COMP({y for (x, y) in R if sum(\n COMP({x for (x, y) in R}, [c], {'a': 'b'}),\n {'f': 'g'}) > 5},\n None, {'d': 'e'})\n ''')\n exp_unused = {self.pe('foo')}\n \n self.assertEqual(tree, exp_tree)\n self.assertEqual(unused, exp_unused)\n \n def test_expand_maint(self):\n maint_node = Maintenance('Q', 'pass',\n self.pc('print(1)'),\n self.pc('pass'),\n self.pc('print(2)'))\n tree = Module((maint_node,))\n tree = MaintExpander.run(tree)\n \n exp_tree = self.p('''\n Comment('Begin maint Q before \"pass\"')\n print(1)\n Comment('End maint Q before \"pass\"')\n pass\n Comment('Begin maint Q after \"pass\"')\n print(2)\n Comment('End maint Q after \"pass\"')\n ''')\n \n self.assertEqual(tree, exp_tree)\n \n def test_export(self):\n tree = self.p('''\n OPTIONS(v = 'u')\n print(COMP({sum(x, {'c': 'd'}) for x in S}, [S], {'a': 'b'}))\n ''')\n tree = export_program(tree)\n exp_tree = parse_structast('''\n print({sum(x) for x in S})\n ''')\n self.assertEqual(tree, exp_tree)\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.6379597187042236, "alphanum_fraction": 0.6381111145019531, "avg_line_length": 32.882049560546875, "blob_id": "e622a232f393444f4ec1ee87604bafa6e009116c", "content_id": "ea0019e99ac3c3eebcaa830efd921e1bf94c0120", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6607, "license_type": "no_license", "max_line_length": 104, "num_lines": 195, "path": "/experiments/rbac/corerbac/coreRBAC_in.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "# author: annie liu, programed based on ANSI RBAC.\n\n# Changes by Jon:\n# - de-classified things (methods to functions, fields to globals)\n# - import runtimelib\n# - set() to comprehension syntax\n# - flattened PR to triples\n# - turned set difference operations (-=) into iterated removals\n# - for set difference comprehensions, added membership clauses for\n# the set being removed from, in order to constrain enumvar domains\n# (as in section 4 of PEPM)\n# - split some \"and\" comprehension conditions into individual conditions\n# - eliminated the comprehension over parameter \"ars\" in CreateSession\n# - added methods AddOperation and AddObject (no corresponding\n# deletion methods) \n# - wherever a for loop iterates over a comprehension, create\n# a copy of the result if it may change during the loop\n# - added CheckAccess_nodemand() for doing a CheckAccess query without\n# checking and adding to the demand set -- only for use when the\n# parameters are already known to be demanded\n\nfrom incoq.runtime import *\n\nOBJS = set() \nOPS = set() # an operation-object pair is called a permission\nUSERS = set()\nROLES = set()\nPR = set() # PR subset OPS * OBJS * ROLES, for PA in std\nUR = set() # UR subset USERS * ROLES, for UA in std\nSESSIONS = set()\nSU = set() # SU subset SESSIONS * USERS\nSR = set() # SR subset SESSIONS * ROLES\n\n# administrative commands\n\ndef AddUser(user):\n assert user not in USERS\n USERS.add(user)\n\ndef DeleteUser(user):\n assert user in USERS\n for _user, _r in set({(user,r) for r in ROLES if (user, r) in UR}): # maintain UR\n UR.remove((_user, _r))\n for s in set({s for s in SESSIONS if (s,user) in SU}):\n DeleteSession(user,s) # maintain sessions\n USERS.remove(user) # delete user last -yl\n\ndef AddRole(role):\n assert role not in ROLES\n ROLES.add(role)\n\ndef DeleteRole(role):\n assert role in ROLES\n for _op, _obj, _role in set({(op,obj,role) for op in OPS for obj in OBJS if (op, obj, role) in PR}):\n PR.remove((_op, _obj, _role))\n for _u, _role in set({(u,role) for u in USERS if (u, role) in UR}): # maintain PR and UR\n UR.remove((_u, _role))\n for (s,u) in set({(s,u) for s in SESSIONS for u in USERS\n if (s,u) in SU if (s,role) in SR}):\n DeleteSession(u,s) # maintain sessions\n ROLES.remove(role) # delete role last -cw\n\ndef AssignUser(user, role):\n assert user in USERS\n assert role in ROLES\n assert (user,role) not in UR\n UR.add((user,role))\n\ndef DeassignUser(user, role):\n assert user in USERS\n assert role in ROLES\n assert (user,role) in UR\n for s in set({s for s in SESSIONS \n if (s,user) in SU if (s,role) in SR}):\n DeleteSession(user,s) # maintain sessions\n UR.remove((user,role))\n\ndef AddOperation(operation):\n assert operation not in OPS\n OPS.add(operation)\n\ndef AddObject(object):\n assert object not in OBJS\n OBJS.add(object)\n\ndef GrantPermission(operation, object, role):\n assert operation in OPS and object in OBJS\n assert role in ROLES\n assert (operation,object,role) not in PR #+\n PR.add((operation,object,role))\n\ndef RevokePermission(operation, object, role):\n assert operation in OPS and object in OBJS\n assert role in ROLES\n assert (operation,object,role) in PR\n PR.remove((operation,object,role))\n\n# supporting system functions\n\ndef CreateSession(user, session, ars):\n assert user in USERS\n assert session not in SESSIONS\n assert ars.issubset(AssignedRoles(user))\n SESSIONS.add(session) # add first for subset constraints -ag\n SU.add((session,user)) # ok to do in any order if atomic -yl\n # Can't put ars in a comprehension since it's a local var, not a\n # top level relation.\n for r in ars:\n SR.add((session, r))\n\ndef DeleteSession(user, session):\n assert user in USERS\n assert session in SESSIONS\n assert (session,user) in SU\n SU.remove((session,user))\n for _session, _r in set({(session,r) for r in ROLES if (session, r) in SR}): # maintain SR\n SR.remove((_session, _r))\n SESSIONS.remove(session) # maintain SESSIONS\n\ndef AddActiveRole(user, session, role):\n assert user in USERS\n assert session in SESSIONS\n assert role in ROLES\n assert (session,user) in SU\n assert (session,role) not in SR\n assert role in AssignedRoles(user)\n SR.add((session,role))\n\ndef DropActiveRole(user, session, role):\n assert user in USERS\n assert session in SESSIONS\n assert role in ROLES\n assert (session,user) in SU\n assert (session,role) in SR\n SR.remove((session,role))\n\ndef CheckAccess(session, operation, object):\n assert session in SESSIONS\n assert operation in OPS\n assert object in OBJS\n return bool({r for r in ROLES\n if (session,r) in SR\n if (operation,object,r) in PR})\n\ndef CheckAccess_nodemand(session, operation, object):\n assert session in SESSIONS\n assert operation in OPS\n assert object in OBJS\n return bool(NODEMAND({r for r in ROLES\n if (session,r) in SR\n if (operation,object,r) in PR}))\n\n# review functions\n\ndef AssignedUsers(role):\n assert role in ROLES\n return {u for u in USERS if (u,role) in UR}\n\ndef AssignedRoles(user):\n assert user in USERS\n return {r for r in ROLES if (user,r) in UR}\n\n# advanced review functions\n\ndef RolePermissions(role):\n assert role in ROLES\n return {(op,obj) for op in OPS for obj in OBJS \n if (op,obj,role) in PR}\n\ndef UserPermissions(user):\n assert user in USERS\n return {(op,obj) for r in ROLES\n for op in OPS for obj in OBJS\n if (user,r) in UR if (op,obj,r) in PR}\n\ndef SessionRoles(session):\n assert session in SESSIONS\n return {r for r in ROLES if (session,r) in SR}\n\ndef SessionPermissions(session):\n assert session in SESSIONS\n return {(op,obj) for r in ROLES\n for op in OPS for obj in OBJS\n if (session,r) in SR if (op,obj,r) in PR}\n\ndef RoleOperationsOnObject(role, object):\n assert role in ROLES \n assert object in OBJS\n return {op for op in OPS if (op,object,role) in PR}\n\ndef UserOperationsOnObject(user, object):\n assert user in USERS\n assert object in OBJS\n return {op for r in ROLES for op in OPS\n if (user,r) in UR if (op,object,r) in PR}\n" }, { "alpha_fraction": 0.6823529601097107, "alphanum_fraction": 0.6823529601097107, "avg_line_length": 13.166666984558105, "blob_id": "d23b0d0ef134bf7a6aae5153b90d7aa401739f98", "content_id": "dd731c05ee7de6330085e3decdad7b49fcf5e6b9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 85, "license_type": "no_license", "max_line_length": 27, "num_lines": 6, "path": "/incoq/compiler/aggr/__init__.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Aggregate operations.\"\"\"\n\n\n# Exports.\nfrom .aggr import *\nfrom .aggrcomp import *\n" }, { "alpha_fraction": 0.4507969617843628, "alphanum_fraction": 0.4636174738407135, "avg_line_length": 29.0625, "blob_id": "de49d6d8dd80f83305028428ebc2bd7c89f25fb0", "content_id": "0b46eec668f9d94467930aec2f00a708fc4fd529", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2886, "license_type": "no_license", "max_line_length": 75, "num_lines": 96, "path": "/incoq/tests/invinc/comp/test_order.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Unit tests for order.py.\"\"\"\n\n\nimport unittest\n\nimport incoq.compiler.incast as L\nfrom incoq.compiler.comp.clause import EnumClause, CondClause\nfrom incoq.compiler.comp.order import *\nState = AsymptoticOrderer.State\n\n\nclass TestOrder(unittest.TestCase):\n \n @classmethod\n def setUpClass(self):\n def ec(source):\n return EnumClause.from_expr(L.pe(source))\n def cc(source):\n return CondClause(L.pe(source))\n \n self.clauses = [\n ec('(a, b) in R'),\n ec('(a, c) in R'),\n ec('(b, c) in R'),\n cc('a != 5'),\n cc('d != 5'),\n ]\n \n self.bindenv = {'a', 'b'}\n \n def test_rate(self):\n rates = [cl.rate(self.bindenv) for cl in self.clauses]\n expected = [\n Rate.CONSTANT_MEMBERSHIP,\n Rate.NORMAL,\n Rate.NORMAL,\n Rate.CONSTANT,\n Rate.UNRUNNABLE,\n ]\n \n self.assertEqual(rates, expected)\n \n def test_step(self):\n def chosen_list(*args):\n return list(map(lambda i: (i, self.clauses[i]), args))\n def remaining_list(*args):\n return list(map(lambda i: (i, self.clauses[i]), args))\n \n state = State({'a', 'b'},\n chosen_list(0, 3),\n remaining_list(1, 2, 4),\n {})\n \n exp_state1 = State({'a', 'b', 'c'},\n chosen_list(0, 3, 1),\n remaining_list(2, 4),\n {})\n exp_state2 = State({'a', 'b', 'c'},\n chosen_list(0, 3, 2),\n remaining_list(1, 4),\n {})\n \n self.assertEqual(state.step(deterministic=False),\n [exp_state1, exp_state2])\n self.assertEqual(state.step(deterministic=True),\n [exp_state1])\n \n def test_get_orders(self):\n cl = self.clauses\n order = AsymptoticOrderer().get_order(enumerate(self.clauses[0:4]))\n \n exp_order = [\n (0, cl[0], set()),\n (3, cl[3], {'a', 'b'}),\n (1, cl[1], {'a', 'b'}),\n (2, cl[2], {'a', 'b', 'c'}),\n ]\n \n self.assertEqual(order, exp_order)\n \n def test_init_bounds(self):\n AsymptoticOrderer().get_order(enumerate(self.clauses),\n init_bounds=('d',))\n \n with self.assertRaises(AssertionError):\n AsymptoticOrderer().get_order(enumerate(self.clauses))\n \n def test_overrides(self):\n orderer = AsymptoticOrderer({'(d != 5)': -1})\n order = orderer.get_order(enumerate(self.clauses))\n \n self.assertEqual(order[0], (4, self.clauses[4], set()))\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.3819548785686493, "alphanum_fraction": 0.38308271765708923, "avg_line_length": 27.29787254333496, "blob_id": "d0fead506b94d8b72e450f4be78bcac21b0cb848", "content_id": "632e2b78ebe1164100e1bff9d31dbc5f55e686c0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2660, "license_type": "no_license", "max_line_length": 79, "num_lines": 94, "path": "/incoq/tests/invinc/set/test_setmatch.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "###############################################################################\n# test_setmatch.py #\n# Author: Jon Brandvein #\n###############################################################################\n\n\"\"\"Unit tests for the auxmap module.\"\"\"\n\n\nimport unittest\n\nimport incoq.compiler.incast as L\nfrom incoq.compiler.central import CentralCase\nfrom incoq.compiler.set.mask import Mask\nfrom incoq.compiler.set.setmatch import *\n\n\nclass TestSetmatchMacros(CentralCase):\n \n def test_bindmatch_bb(self):\n code = make_bindmatch('R', Mask('bb'), ['x', 'y'], [], L.pc('pass'))\n \n exp_code = L.pc('''\n if ((x, y) in R):\n pass\n ''')\n \n self.assertEqual(code, exp_code)\n \n def test_bindmatch_uu(self):\n code = make_bindmatch('R', Mask('uu'), [], ['x', 'y'], L.pc('pass'))\n \n exp_code = L.pc('''\n for (x, y) in R:\n pass\n ''')\n \n self.assertEqual(code, exp_code)\n \n def test_bindmatch_b1(self):\n code = make_bindmatch('R', Mask('b1'), ['x'], [], L.pc('pass'))\n \n exp_code = L.pc('''\n for _ in setmatch(R, 'b1', x):\n pass\n ''')\n \n self.assertEqual(code, exp_code)\n \n def test_bindmatch_other(self):\n code = make_bindmatch('R', Mask('bu'), ['x'], ['y'], L.pc('pass'))\n \n exp_code = L.pc('''\n for y in setmatch(R, 'bu', x):\n pass\n ''')\n \n self.assertEqual(code, exp_code)\n \n def test_tuplematch_bb(self):\n code = make_tuplematch(L.pe('v'), Mask('bb'),\n ['x', 'y'], [], L.pc('pass'))\n \n exp_code = L.pc('''\n if ((x, y) == v):\n pass\n ''')\n \n self.assertEqual(code, exp_code)\n \n def test_tuplematch_uu(self):\n code = make_tuplematch(L.pe('v'), Mask('uu'),\n [], ['x', 'y'], L.pc('pass'))\n \n exp_code = L.pc('''\n (x, y) = v\n pass\n ''')\n \n self.assertEqual(code, exp_code)\n \n def test_tuplematch_other(self):\n code = make_tuplematch(L.pe('v'), Mask('bu'),\n ['x'], ['y'], L.pc('pass'))\n \n exp_code = L.pc('''\n for y in setmatch({v}, 'bu', x):\n pass\n ''')\n \n self.assertEqual(code, exp_code)\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.38827839493751526, "alphanum_fraction": 0.4542124569416046, "avg_line_length": 18.5, "blob_id": "db46d99998f38b2638e7d97541ba47ba1e61378d", "content_id": "9676fe4baa1f6d90becd5ab3079f5308b4d8b2bf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 273, "license_type": "no_license", "max_line_length": 83, "num_lines": 14, "path": "/incoq/tests/programs/comp/nonpattern_in.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "# Test pattern conversion.\n\nfrom incoq.runtime import *\n\nE = Set()\nS = Set()\n\nfor v1, v2, z in {(1, 1, 'a'), (1, 2, 'b'), (1, 3, 'c'), (2, 3, 'd'), (3, 4, 'e')}:\n E.add((v1, v2, z))\n\nS.add(1)\n\nprint(sorted({x for (x, x2, c) in E if x2 in S\n if x == x2}))\n" }, { "alpha_fraction": 0.4162667393684387, "alphanum_fraction": 0.4713311493396759, "avg_line_length": 36.00934600830078, "blob_id": "cb1e61a172f6550d88a1c036c7123a7f31c3cbe9", "content_id": "85e428fde0aaf8c912bb090ee3c328bfcb558638", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3959, "license_type": "no_license", "max_line_length": 119, "num_lines": 107, "path": "/incoq/tests/programs/objcomp/inputrel_out.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from incoq.runtime import *\n# Comp1 := {(s, o_i) : (s, o) in _M, (o, o_i) in _F_i, o_i in N}\n_m_Comp1_out = Map()\ndef _maint__m_Comp1_out_add(_e):\n (v11_1, v11_2) = _e\n if (v11_1 not in _m_Comp1_out):\n _m_Comp1_out[v11_1] = set()\n _m_Comp1_out[v11_1].add(v11_2)\n\ndef _maint__m_Comp1_out_remove(_e):\n (v12_1, v12_2) = _e\n _m_Comp1_out[v12_1].remove(v12_2)\n if (len(_m_Comp1_out[v12_1]) == 0):\n del _m_Comp1_out[v12_1]\n\n_m__F_i_in = Map()\ndef _maint__m__F_i_in_add(_e):\n (v9_1, v9_2) = _e\n if (v9_2 not in _m__F_i_in):\n _m__F_i_in[v9_2] = set()\n _m__F_i_in[v9_2].add(v9_1)\n\n_m__M_in = Map()\ndef _maint__m__M_in_add(_e):\n (v7_1, v7_2) = _e\n if (v7_2 not in _m__M_in):\n _m__M_in[v7_2] = set()\n _m__M_in[v7_2].add(v7_1)\n\nComp1 = RCSet()\ndef _maint_Comp1__M_add(_e):\n # Iterate {(v1_s, v1_o, v1_o_i) : (v1_s, v1_o) in deltamatch(_M, 'bb', _e, 1), (v1_o, v1_o_i) in _F_i, v1_o_i in N}\n (v1_s, v1_o) = _e\n if hasattr(v1_o, 'i'):\n v1_o_i = v1_o.i\n if (v1_o_i in N):\n if ((v1_s, v1_o_i) not in Comp1):\n Comp1.add((v1_s, v1_o_i))\n # Begin maint _m_Comp1_out after \"Comp1.add((v1_s, v1_o_i))\"\n _maint__m_Comp1_out_add((v1_s, v1_o_i))\n # End maint _m_Comp1_out after \"Comp1.add((v1_s, v1_o_i))\"\n else:\n Comp1.incref((v1_s, v1_o_i))\n\ndef _maint_Comp1__F_i_add(_e):\n # Iterate {(v3_s, v3_o, v3_o_i) : (v3_s, v3_o) in _M, (v3_o, v3_o_i) in deltamatch(_F_i, 'bb', _e, 1), v3_o_i in N}\n (v3_o, v3_o_i) = _e\n if (v3_o_i in N):\n for v3_s in (_m__M_in[v3_o] if (v3_o in _m__M_in) else set()):\n if ((v3_s, v3_o_i) not in Comp1):\n Comp1.add((v3_s, v3_o_i))\n # Begin maint _m_Comp1_out after \"Comp1.add((v3_s, v3_o_i))\"\n _maint__m_Comp1_out_add((v3_s, v3_o_i))\n # End maint _m_Comp1_out after \"Comp1.add((v3_s, v3_o_i))\"\n else:\n Comp1.incref((v3_s, v3_o_i))\n\ndef _maint_Comp1_N_add(_e):\n # Iterate {(v5_s, v5_o, v5_o_i) : (v5_s, v5_o) in _M, (v5_o, v5_o_i) in _F_i, v5_o_i in deltamatch(N, 'b', _e, 1)}\n v5_o_i = _e\n for v5_o in (_m__F_i_in[v5_o_i] if (v5_o_i in _m__F_i_in) else set()):\n for v5_s in (_m__M_in[v5_o] if (v5_o in _m__M_in) else set()):\n if ((v5_s, v5_o_i) not in Comp1):\n Comp1.add((v5_s, v5_o_i))\n # Begin maint _m_Comp1_out after \"Comp1.add((v5_s, v5_o_i))\"\n _maint__m_Comp1_out_add((v5_s, v5_o_i))\n # End maint _m_Comp1_out after \"Comp1.add((v5_s, v5_o_i))\"\n else:\n Comp1.incref((v5_s, v5_o_i))\n\nN = Set()\nfor i in range(1, 5):\n N.add(i)\n # Begin maint Comp1 after \"N.add(i)\"\n _maint_Comp1_N_add(i)\n # End maint Comp1 after \"N.add(i)\"\ns1 = Set()\ns2 = Set()\nfor i in N:\n o = Obj()\n o.i = i\n # Begin maint _m__F_i_in after \"_F_i.add((o, i))\"\n _maint__m__F_i_in_add((o, i))\n # End maint _m__F_i_in after \"_F_i.add((o, i))\"\n # Begin maint Comp1 after \"_F_i.add((o, i))\"\n _maint_Comp1__F_i_add((o, i))\n # End maint Comp1 after \"_F_i.add((o, i))\"\n if (i % 2):\n s1.add(o)\n # Begin maint _m__M_in after \"_M.add((s1, o))\"\n _maint__m__M_in_add((s1, o))\n # End maint _m__M_in after \"_M.add((s1, o))\"\n # Begin maint Comp1 after \"_M.add((s1, o))\"\n _maint_Comp1__M_add((s1, o))\n # End maint Comp1 after \"_M.add((s1, o))\"\n else:\n s2.add(o)\n # Begin maint _m__M_in after \"_M.add((s2, o))\"\n _maint__m__M_in_add((s2, o))\n # End maint _m__M_in after \"_M.add((s2, o))\"\n # Begin maint Comp1 after \"_M.add((s2, o))\"\n _maint_Comp1__M_add((s2, o))\n # End maint Comp1 after \"_M.add((s2, o))\"\ns = s1\nprint(sorted((_m_Comp1_out[s] if (s in _m_Comp1_out) else set())))\ns = s2\nprint(sorted((_m_Comp1_out[s] if (s in _m_Comp1_out) else set())))" }, { "alpha_fraction": 0.51241534948349, "alphanum_fraction": 0.5598194003105164, "avg_line_length": 13.766666412353516, "blob_id": "0ff326cadd5df2360864ed54ef079a4612d8883b", "content_id": "7fb4f000b57cbbdfa34410b9224157ea6394f4e1", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "Python", "length_bytes": 443, "license_type": "no_license", "max_line_length": 60, "num_lines": 30, "path": "/experiments/cache/cachetest.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "import sys\nimport gc\nfrom time import process_time as pt\nfrom random import randrange\n\ngc.disable()\n\n\nn_ops = 100000\nn_trials = 10\nmax_x = 20000\n\n\nclass Obj:\n pass\n\n\ndef run(x, reps):\n objs = [Obj() for _ in range(max_x)]\n col = [(i, (objs[randrange(x)],)) for i in range(n_ops)]\n \n t1 = pt()\n for _ in range(reps):\n for (i, (o,)) in col:\n pass\n t2 = pt()\n return t2 - t1\n\n\nrun(int(sys.argv[1]), 500)\n" }, { "alpha_fraction": 0.4832802414894104, "alphanum_fraction": 0.4904458522796631, "avg_line_length": 30.399999618530273, "blob_id": "b7972384feb603d70d212d56ec4ab3612a490996", "content_id": "7b7cfa04940e91966c84d7f3470e7093ed057e75", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2512, "license_type": "no_license", "max_line_length": 71, "num_lines": 80, "path": "/incoq/tests/invinc/tup/test_tupclause.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Unit tests for tupclause.py\"\"\"\n\n\nimport unittest\n\nimport incoq.compiler.incast as L\nfrom incoq.compiler.comp import Rate\nfrom incoq.compiler.tup.tupclause import *\n\n\nclass TupClauseFactory(TupClauseFactory_Mixin):\n typecheck = True\n\nclass TupClauseFactory_NoTC(TupClauseFactory):\n typecheck = False\n\n\nclass TupClauseCase(unittest.TestCase):\n \n def test_tclause(self):\n cl = TClause('t', ['x', 'y'])\n \n # AST round-trip.\n clast = cl.to_AST()\n exp_clast = L.Enumerator(L.tuplify(('t', 'x', 'y'), lval=True),\n L.ln('_TUP2'))\n self.assertEqual(clast, exp_clast)\n cl2 = TClause.from_AST(exp_clast, TupClauseFactory)\n self.assertEqual(cl2, cl)\n \n # Attributes.\n self.assertEqual(cl.enumlhs, ('t', 'x', 'y'))\n self.assertEqual(cl.pat_mask, (True, True, True))\n self.assertEqual(cl.enumvars_tagsin, ('t',))\n self.assertEqual(cl.enumvars_tagsout, ('x', 'y'))\n \n self.assertCountEqual(cl.get_domain_constrs('_'),\n [('_t', ('<T>', '_t.1', '_t.2')),\n ('_t.1', '_x'),\n ('_t.2', '_y')])\n \n # Rate.\n rate = cl.rate([])\n self.assertEqual(rate, Rate.UNRUNNABLE)\n rate = cl.rate(['t'])\n self.assertEqual(rate, Rate.CONSTANT)\n \n # Code.\n code = cl.get_code(['t'], L.pc('pass'))\n exp_code = L.pc('''\n if isinstance(t, tuple) and len(t) == 2:\n for x, y in setmatch({(t, t[0], t[1])}, 'buu', t):\n pass\n ''')\n self.assertEqual(code, exp_code)\n \n # Code, no type-checks.\n cl = TClause_NoTC('t', ['x', 'y'])\n code = cl.get_code(['t'], L.pc('pass'))\n exp_code = L.pc('''\n for x, y in setmatch({(t, t[0], t[1])}, 'buu', t):\n pass\n ''')\n self.assertEqual(code, exp_code)\n \n def test_objclausefactory(self):\n cl = TClause('t', ['x', 'y'])\n clast = L.Enumerator(L.tuplify(['t', 'x', 'y'], lval=True),\n L.pe('_TUP2'))\n cl2 = TupClauseFactory.from_AST(clast)\n self.assertEqual(cl2, cl)\n \n cl = TClause_NoTC('t', ['x', 'y'])\n cl2 = TupClauseFactory_NoTC.from_AST(clast)\n self.assertEqual(cl2, cl)\n self.assertIsInstance(cl2, TClause_NoTC)\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.5017825365066528, "alphanum_fraction": 0.5160427689552307, "avg_line_length": 27.769229888916016, "blob_id": "851bacfd885ac46c9ce2783c07b85c0740d6ff24", "content_id": "1dab04264fb02552f0885d8c7ab3daae5ee68d8d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1122, "license_type": "no_license", "max_line_length": 62, "num_lines": 39, "path": "/incoq/tests/invinc/obj/test_pairrel.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Unit tests for pairrel.py.\"\"\"\n\n\nimport unittest\n\nimport incoq.compiler.incast as L\nfrom incoq.compiler.obj.pairrel import *\n\n\nclass PairrelCase(unittest.TestCase):\n \n def test_basic(self):\n self.assertTrue(is_mrel('_M'))\n self.assertTrue(is_frel('_F_f'))\n self.assertFalse(is_frel('R'))\n self.assertEqual(get_frel_field('_F_f'), 'f')\n self.assertEqual(make_frel('f'), '_F_f')\n self.assertTrue(is_maprel('_MAP'))\n \n def test_parseclause(self):\n comp = L.pe('''\n COMP({... for (a, b) in _M for (c, d) in _F_e\n for (f, g, h) in _MAP if x > 5}, [], {})\n ''')\n \n res1 = get_menum(comp.clauses[0])\n exp_res1 = (L.sn('a'), L.sn('b'))\n res2 = get_fenum(comp.clauses[1])\n exp_res2 = (L.sn('c'), L.sn('d'), 'e')\n res3 = get_mapenum(comp.clauses[2])\n exp_res3 = (L.sn('f'), L.sn('g'), L.sn('h'))\n \n self.assertEqual(res1, exp_res1)\n self.assertEqual(res2, exp_res2)\n self.assertEqual(res3, exp_res3)\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.5489311218261719, "alphanum_fraction": 0.5494061708450317, "avg_line_length": 35.736473083496094, "blob_id": "debcf8e37405792ee38a4231b4c9270e4147a450", "content_id": "01986465783b01aab17872a84cd714816abfe3d1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 21050, "license_type": "no_license", "max_line_length": 77, "num_lines": 573, "path": "/incoq/compiler/incast/typeeval.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Type analysis based on abstract interpretation.\"\"\"\n\n\n__all__ = [\n 'TypeAnalysisFailure',\n 'TypeAnalyzer',\n 'analyze_types',\n]\n\n\nfrom incoq.util.seq import pairs\n\nfrom .nodes import (Load, Store, Not, Eq, NotEq, Lt, LtE, Gt, GtE,\n Is, IsNot, In, NotIn, Enumerator, expr, Name,\n BitOr, BitXor, BitAnd)\nfrom .structconv import AdvNodeTransformer\nfrom .error import ProgramError\nfrom .util import VarsFinder\nfrom .types import *\n\n\nclass TypeAnalysisFailure(ProgramError):\n \n def __init__(self, msg=None, node=None, store=None):\n super().__init__(node=node)\n self.msg = msg\n self.node = node\n self.store = store\n \n def __str__(self):\n s = ''\n if self.msg is not None:\n s += str(self.msg)\n if self.store is not None:\n if self.msg is not None:\n s += '\\n'\n s += 'Store: ' + str(self.store)\n return s\n\n\nclass TypeAnalyzer(AdvNodeTransformer):\n \n \"\"\"Abstract interpreter for type analysis.\n \n The flow of type information mirrors the dataflow of the concrete\n interpretation. Nodes exchange data (types) with their children\n via visit() calls. Sending data to a child is done by passing an\n extra input argument to visit(). Receiving data is done by visit()\n returning a non-None value.\n \n Expression nodes have different behavior depending on whether they\n are in Load or Store context.\n \n There is a separate store mapping variables to their type\n information.\n \n Expression node type fields are updated as they are passed\n up or down the tree.\n \"\"\"\n \n def __init__(self, store=None):\n super().__init__()\n if store is None:\n store = {}\n self.store = store\n \n def top_helper(self, node, input=None):\n # Result: top\n node = self.generic_visit(node)\n return node._replace(type=toptype)\n \n # Statements.\n \n def visit_Delete(self, node):\n # Don't bother descending into children.\n pass\n \n def visit_Assign(self, node):\n # Action: target <- value for each target\n value = self.visit(node.value)\n targets = [self.visit(t, value.type) for t in node.targets]\n return node._replace(value=value, targets=targets)\n \n def visit_For(self, node):\n # Trigger: iter = set<T> for some T\n # Action: target <- T\n iter = self.visit(node.iter)\n if isinstance(iter.type, SetType):\n target = self.visit(node.target, iter.type.et)\n else:\n target = node.target\n body = self.visit(node.body)\n orelse = self.visit(node.orelse)\n return node._replace(iter=iter, target=target,\n body=body, orelse=orelse)\n \n # Expressions.\n \n def visit_BoolOp(self, node):\n # Result: bool\n # Cond: v <= bool for each value v\n values = [self.visit(v) for v in node.values]\n if not all(v.type.issubtype(booltype) for v in values):\n raise TypeAnalysisFailure('BoolOp has non-bool arg',\n node, self.store)\n return node._replace(values=values, type=booltype)\n \n def visit_BinOp(self, node):\n # If op is BitOr | BitXor | BitAnd:\n # Result: join(left, right)\n # Otherwise:\n # Result: number\n # Cond: left <= number, right <= number\n left = self.visit(node.left)\n right = self.visit(node.right)\n if isinstance(node.op, (BitOr, BitXor, BitAnd)):\n resulttype = left.type.join(right.type)\n return node._replace(left=left, right=right, type=resulttype)\n else:\n if not (left.type.issubtype(numbertype) and\n right.type.issubtype(numbertype)):\n raise TypeAnalysisFailure('BinOp has non-number arg',\n node, self.store)\n return node._replace(left=left, right=right, type=numbertype)\n \n def visit_UnaryOp(self, node):\n # If op is \"not\":\n # Result: bool\n # Cond: operand <= bool\n # Otherwise:\n # Result: number\n # Cond: operand <= number\n operand = self.visit(node.operand)\n resulttype = (booltype if isinstance(node.op, Not)\n else numbertype)\n t = resulttype.join(operand.type)\n if not t.issubtype(resulttype):\n raise TypeAnalysisFailure('UnaryOp requires ' + str(resulttype),\n node, self.store)\n return node._replace(operand=operand, type=t)\n \n visit_Lambda = top_helper\n \n def visit_IfExp(self, node):\n # Result: join(body, orelse)\n # Cond: test <= bool\n test = self.visit(node.test)\n body = self.visit(node.body)\n orelse = self.visit(node.orelse)\n t = body.type.join(orelse.type)\n if not test.type.issubtype(booltype):\n raise TypeAnalysisFailure('IfExp requires bool condition',\n node, self.store)\n return node._replace(test=test, body=body, orelse=orelse, type=t)\n \n def visit_Dict(self, node):\n # Result: dict<join(keys), join(values)>\n keys = [self.visit(k) for k in node.keys]\n values = [self.visit(v) for v in node.values]\n t = DictType(bottomtype.join(*[k.type for k in keys]),\n bottomtype.join(*[v.type for v in values]))\n return node._replace(keys=keys, value=values, type=t)\n \n def visit_Set(self, node):\n # Result: set<join(elts)>\n elts = [self.visit(e) for e in node.elts]\n t = SetType(bottomtype.join(*[e.type for e in elts]))\n return node._replace(elts=elts, type=t)\n \n def visit_ListComp(self, node):\n # Result: list<elt>\n generators = [self.visit(gen) for gen in node.generators]\n elt = self.visit(node.elt)\n t = ListType(elt.type)\n return node._replace(generators=generators, elt=elt, type=t)\n \n def visit_SetComp(self, node):\n # Result: set<elt>\n generators = [self.visit(gen) for gen in node.generators]\n elt = self.visit(node.elt)\n t = SetType(elt.type)\n return node._replace(generators=generators, elt=elt, type=t)\n \n def visit_DictComp(self, node):\n # Result: dict<key, value>\n generators = [self.visit(gen) for gen in node.generators]\n key = self.visit(node.key)\n value = self.visit(node.value)\n t = DictType(key.type, value.type)\n return node._replace(generators=generators,\n key=key, value=value, type=t)\n \n visit_GeneratorExp = top_helper\n visit_Yield = top_helper\n visit_YieldFrom = top_helper\n \n def compare_helper(self, node, op, t_left, t_right):\n # If op is ordering:\n # Cond: left and right are bottom, numbertype, or any tuple\n # If op is equality/identity or their negations:\n # No action\n # If op is membership or its negation:\n # Cond: right <= set<top> or right <= dict<top, top>\n def allowed(t):\n return (t.issubtype(booltype) or\n t.issubtype(numbertype) or\n t.issubtype(strtype) or\n isinstance(t, TupleType) or\n isinstance(t, ListType) or\n isinstance(t, SetType) or\n isinstance(t, DictType))\n \n if isinstance(op, (Lt, LtE, Gt, GtE)):\n if not (allowed(t_left) and allowed(t_right)):\n raise TypeAnalysisFailure('Order comparison requires '\n 'number or tuple operands',\n node, self.store)\n elif isinstance(op, (Eq, NotEq, Is, IsNot)):\n # We don't check to see that the two operands are\n # of the same type. Even though this is hopefully\n # true in the final analysis results, it's not\n # necessarily true at an arbitrary intermediate\n # stage. We don't propagate type information against\n # the dataflow, so there's nothing to do.\n pass\n elif isinstance(op, (In, NotIn)):\n # Same issue as above. We don't compare the types against\n # each other, but at least we can say the RHS must be a set.\n if not (t_right.issubtype(SetType(toptype)) or\n t_right.issubtype(DictType(toptype, toptype))):\n raise TypeAnalysisFailure('Membership comparison requires '\n 'set on right-hand side',\n node, self.store)\n \n def visit_Compare(self, node):\n # Result: bool\n # Cond: compare_helper(left, op, right) for each pair\n values = (node.left,) + node.comparators\n values = [self.visit(v) for v in values]\n for (left, right), op in zip(pairs(values), node.ops):\n self.compare_helper(node, op, left.type, right.type)\n return node._replace(left=values[0], comparators=values[1:],\n type=booltype)\n \n def visit_Call(self, node):\n ### TODO\n # ???\n node = self.generic_visit(node)\n return node._replace(type=bottomtype)\n \n def visit_Num(self, node):\n # Result: number\n return node._replace(type=numbertype)\n \n def visit_Str(self, node):\n # Result: str\n return node._replace(type=strtype)\n \n visit_Bytes = top_helper\n \n def visit_NameConstant(self, node):\n # If True/False:\n # Result: bool\n # If None:\n # Result: bottom\n if node.value in [True, False]:\n t = booltype\n else:\n # Arguably, None should be toptype since it is\n # definitively a value that doesn't fit into\n # other classifications. But this messes with\n # global variable initialization in some of the\n # distalgo examples (SELF_ID = None at the header\n # makes SELF_ID be considered to have top type).\n #\n # The tradeoff is that expressions like 5 + None\n # won't be caught as type errors.\n t = bottomtype\n return node._replace(type=t)\n \n visit_Ellipsis = top_helper\n \n # Context-dependent expressions.\n #\n # These nodes may appear in Load or Store context. These cases\n # are handled separately, since the dataflow goes in opposite\n # directions. Del context is ignored.\n \n def visit_Attribute(self, node, input=None):\n ### TODO\n return node._replace(type=bottomtype)\n \n def visit_Subscript(self, node, input=None):\n ### TODO\n return node._replace(type=bottomtype)\n \n visit_Starred = top_helper\n \n def visit_Name(self, node, input=None):\n # In load context:\n # Result: store[id]\n # In store context:\n # Action: store[id] <- input\n if isinstance(node.ctx, Load):\n assert input is None\n t = self.store[node.id]\n return node._replace(type=t)\n elif isinstance(node.ctx, Store):\n assert input is not None\n self.store[node.id] = t = self.store[node.id].join(input)\n return node._replace(type=t)\n \n def visit_List(self, node, input=None):\n # In load context:\n # Result: list<join(elts)>\n # In store context:\n # Cond: input = list<T> for some T\n # Action: elt_i <- T for each i\n if isinstance(node.ctx, Load):\n assert input is None\n elts = [self.visit(e) for e in node.elts]\n t = ListType(bottomtype.join(*[e.type for e in elts]))\n return node._replace(elts=elts, type=t)\n elif isinstance(node.ctx, Store):\n assert input is not None\n if not isinstance(input, ListType):\n raise TypeAnalysisFailure('Store to List requires list type',\n node, self.store)\n elts = [self.visit(e, input.et) for e in node.elts]\n return node._replace(elts=elts, type=input)\n \n def visit_Tuple(self, node, input=None):\n # In load context:\n # Result: tuple<elt_1, ..., elt_n>\n # In store context:\n # Trigger: input = tuple<...> with len(elts) parts\n # Action: elt_i <- input_i for each i\n if isinstance(node.ctx, Load):\n assert input is None\n elts = [self.visit(e) for e in node.elts]\n t = TupleType([e.type for e in elts])\n return node._replace(elts=elts, type=t)\n elif isinstance(node.ctx, Store):\n assert input is not None\n if (isinstance(input, TupleType) and\n len(input.ets) == len(node.elts)):\n elts = [self.visit(e, i)\n for e, i in zip(node.elts, input.ets)]\n return node._replace(elts=elts, type=input)\n \n # Other nodes.\n \n def visit_comprehension(self, node):\n # Trigger: iter = set<T> for some T\n # Action: target <- T\n # Cond: cond = bool for each cond\n iter = self.visit(node.iter)\n if isinstance(iter.type, SetType):\n target = self.visit(node.target, iter.type.et)\n else:\n target = node.target\n ifs = [self.visit(i) for i in node.ifs]\n for cond in ifs:\n if not cond.type.issubtype(booltype):\n raise TypeAnalysisFailure('Condition clause requires '\n 'bool type', node, self.store)\n return node._replace(iter=iter, target=target, ifs=ifs)\n \n # IncAST nodes.\n \n def visit_Enumerator(self, node):\n # Trigger: iter = set<T> for some T\n # Action: target <- T\n iter = self.visit(node.iter)\n if isinstance(iter.type, SetType):\n target = self.visit(node.target, iter.type.et)\n else:\n target = node.target\n return node._replace(iter=iter, target=target)\n \n def visit_Comp(self, node):\n # Result: set<elt>\n clauses = []\n for cl in node.clauses:\n if isinstance(cl, Enumerator):\n cl = self.visit(cl)\n else:\n cl = self.visit(cl)\n if not cl.type.issubtype(booltype):\n raise TypeAnalysisFailure('Condition clause requires '\n 'bool type', node, self.store)\n clauses.append(cl)\n resexp = self.visit(node.resexp)\n t = SetType(resexp.type)\n return node._replace(resexp=resexp, clauses=tuple(clauses), type=t)\n \n def visit_Aggregate(self, node):\n ### TODO\n node = self.generic_visit(node)\n return node._replace(type=bottomtype)\n \n def visit_SetUpdate(self, node):\n # Cond: target <= set<top>\n target = self.visit(node.target)\n if not target.type.issubtype(SetType(toptype)):\n raise TypeAnalysisFailure('SetUpdate requires set type',\n node, self.store)\n elem = self.visit(node.elem)\n \n # Hack: If target is a Name, update it and its store\n # variable to be at least set<target>.\n if isinstance(target, Name):\n t_target = target.type.join(SetType(elem.type))\n target = target._replace(type=t_target)\n self.store[target.id] = t_target\n \n return node._replace(target=target, elem=elem)\n \n def visit_MacroUpdate(self, node):\n # For set updates:\n # Cond: target <= set<top>\n # Cond: other <= set<top> if other is present\n # For map updates:\n # Cond: target <= dict<top, top>\n # Cond: other <= dict<top, top> if other is present\n if node.op in ['union', 'inter', 'diff', 'symdiff',\n 'assign', 'clear']:\n t_oper = SetType(toptype)\n t_oper_name = 'set'\n elif node.op in ['mapassign', 'mapclear']:\n t_oper = DictType(toptype, toptype)\n t_oper_name = 'dict'\n else:\n assert()\n failure = TypeAnalysisFailure('{} update requires {} type'.format(\n node.op, t_oper_name),\n node, self.store)\n target = self.visit(node.target)\n if not target.type.issubtype(t_oper):\n raise failure\n if node.other is not None:\n other = self.visit(node.other)\n if not other.type.issubtype(t_oper):\n raise failure\n else:\n other = None\n return node._replace(target=target, other=other)\n \n visit_RCSetRefUpdate = visit_SetUpdate\n \n def visit_IsEmpty(self, node):\n # Result: bool\n # Cond: target <= set<top>\n target = self.visit(node.target)\n if not target.type.issubtype(SetType(toptype)):\n raise TypeAnalysisFailure('IsEmpty requires set type',\n node, self.store)\n return node._replace(target=target, type=booltype)\n \n def visit_GetRef(self, node):\n # Result: numbertype\n # Cond: target <= set<top>\n target = self.visit(node.target)\n if not target.type.issubtype(SetType(toptype)):\n raise TypeAnalysisFailure('IsEmpty requires set type',\n node, self.store)\n elem = self.visit(node.elem)\n return node._replace(target=target, elem=elem, type=numbertype)\n \n def lookup_helper(self, node, check_default=False):\n # Cond: target = dict<K, V> for some K, V\n # Cond: key <= K\n # Result: V\n # Cond: default <= V if a default is applicable and provided\n target = self.visit(node.target)\n key = self.visit(node.key)\n if not isinstance(target.type, DictType):\n raise TypeAnalysisFailure('Lookup requires dict type',\n node, self.store)\n if not key.type.issubtype(target.type.kt):\n raise TypeAnalysisFailure('Lookup key does not fit dict '\n 'key type', node, self.store)\n t = target.type.vt\n if check_default:\n if node.default is not None:\n default = self.visit(node.default)\n if not default.type.issubtype(target.type.vt):\n raise TypeAnalysisFailure('Lookup default does not fit '\n 'dict value type',\n node, self.store)\n else:\n default = None\n node = node._replace(target=target, key=key,\n default=default, type=t)\n else:\n node = node._replace(target=target, key=key, type=t)\n return node\n \n def visit_Lookup(self, node):\n return self.lookup_helper(node, check_default=True)\n \n def visit_ImgLookup(self, node):\n return self.lookup_helper(node)\n \n visit_RCImgLookup = visit_ImgLookup\n \n # Temporary stuff.\n \n def visit_SMLookup(self, node):\n ### TODO\n node = self.generic_visit(node)\n return node._replace(type=bottomtype)\n \n def visit_DemQuery(self, node):\n ### TODO\n node = self.generic_visit(node)\n return node._replace(type=bottomtype)\n \n def visit_NoDemQuery(self, node):\n ### TODO\n node = self.generic_visit(node)\n return node._replace(type=bottomtype)\n \n def visit_SetMatch(self, node):\n ### TODO\n node = self.generic_visit(node)\n return node._replace(type=bottomtype)\n \n def visit_DeltaMatch(self, node):\n ### TODO\n node = self.generic_visit(node)\n return node._replace(type=bottomtype)\n\n\n#class TypeAnnotator(NodeTransformer):\n# \n# \"\"\"Transformer that fills in type information for expressions\n# based on a store.\n# \"\"\"\n# \n# def visit_\n\n\ndef analyze_types(tree, vartypes=None):\n if vartypes is None:\n vartypes = {}\n \n varnames = VarsFinder.run(tree)\n \n store = {var: vartypes.get(var, bottomtype)\n for var in varnames}\n \n def widen(store):\n for n, t in store.items():\n store[n] = t.widen(10)\n \n count = 0\n limit = 10\n oldtree = None\n while count < limit:\n if tree == oldtree:\n break\n oldtree = tree\n tree = TypeAnalyzer.run(tree, store)\n widen(store)\n count += 1\n else:\n print('Type analysis cut off after {} iterations'.format(count))\n \n# for k, v in store.items():\n# print(' {} -- {}'.format(k, v))\n \n return tree, store\n" }, { "alpha_fraction": 0.635208010673523, "alphanum_fraction": 0.6363636255264282, "avg_line_length": 23.037036895751953, "blob_id": "1afcb4943b316d4ade8465a64805442eb9f05d8c", "content_id": "c9c9cd81fd6a23fa724afc541797f1c5976f54fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2596, "license_type": "no_license", "max_line_length": 72, "num_lines": 108, "path": "/incoq/compiler/obj/pairrel.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Special relations used for object-domain translation.\"\"\"\n\n\n__all__ = [\n 'MREL_NAME',\n 'FREL_PREFIX',\n 'MAPREL_NAME',\n 'make_mrel',\n 'make_frel',\n 'make_maprel',\n 'is_mrel',\n 'is_frel',\n 'is_maprel',\n 'is_specialrel',\n 'get_frel_field',\n \n 'get_menum',\n 'is_menum',\n 'get_fenum',\n 'is_menum',\n 'get_mapenum',\n 'is_mapenum',\n]\n\n\nimport incoq.compiler.incast as L\n\n\nMREL_NAME = '_M'\nFREL_PREFIX = '_F_'\nMAPREL_NAME = '_MAP'\n\n\ndef make_mrel():\n return MREL_NAME\n\ndef make_frel(field):\n return FREL_PREFIX + field\n\ndef make_maprel():\n return MAPREL_NAME\n\ndef is_mrel(rel):\n return rel == MREL_NAME\n\ndef is_frel(rel):\n return rel.startswith(FREL_PREFIX)\n\ndef is_maprel(rel):\n return rel == MAPREL_NAME\n\ndef is_specialrel(rel):\n return is_mrel(rel) or is_frel(rel) or is_maprel(rel)\n\ndef get_frel_field(rel):\n assert rel.startswith(FREL_PREFIX)\n return rel[len(FREL_PREFIX):]\n\n\n# The following functions take in a comprehension clause (possibly a\n# condition clause), and return the parsed information if it has\n# the right form, or None if it does not. It is an error if the clause\n# is an enumerator over a special relation but doesn't have the correct\n# arity on its left-hand side.\n\ndef get_menum(cl):\n \"\"\"Parse a membership clause, returning the set and element\n components.\n \"\"\"\n if not (isinstance(cl, L.Enumerator) and\n isinstance(cl.iter, L.Name) and\n is_mrel(cl.iter.id)):\n return None\n assert isinstance(cl.target, L.Tuple) and len(cl.target.elts) == 2\n return cl.target.elts\n\ndef is_menum(cl):\n return get_menum(cl) is not None\n\ndef get_fenum(cl):\n \"\"\"Parse a field clause, returning a triple of the object component,\n value component, and field name.\n \"\"\"\n if not (isinstance(cl, L.Enumerator) and\n isinstance(cl.iter, L.Name) and\n is_frel(cl.iter.id)):\n return None\n assert isinstance(cl.target, L.Tuple) and len(cl.target.elts) == 2\n obj, value = cl.target.elts\n field = get_frel_field(cl.iter.id)\n return obj, value, field\n\ndef is_fenum(cl):\n return get_fenum(cl) is not None\n\ndef get_mapenum(cl):\n \"\"\"Parse a map clause, returning a triple of the map, key, and\n value components.\n \"\"\"\n if not (isinstance(cl, L.Enumerator) and\n isinstance(cl.iter, L.Name) and\n is_maprel(cl.iter.id)):\n return None\n assert isinstance(cl.target, L.Tuple) and len(cl.target.elts) == 3\n return cl.target.elts\n\ndef is_mapenum(cl):\n return get_mapenum(cl) is not None\n" }, { "alpha_fraction": 0.49850064516067505, "alphanum_fraction": 0.5030701160430908, "avg_line_length": 27.35222625732422, "blob_id": "c6c980e157ec647f16c5b7d9e19b315a51773926", "content_id": "c30a6ed6bb11e3c8f971fa7dc4b17fbb8ab5ae93", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7003, "license_type": "no_license", "max_line_length": 79, "num_lines": 247, "path": "/experiments/rbac/constrainedrbac/run_crbac_exp.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Run the Constrained RBAC experiment.\"\"\"\n\n\nfrom random import sample, shuffle\nfrom itertools import product\nimport os\nimport sys\nimport importlib\n\nfrom frexp import ExpWorkflow, Datagen, MetricExtractor, Runner\n\nfrom experiments.util import SmallExtractor\n\n\nclass CRBACDatagen(Datagen):\n \n \"\"\"Create users, roles, and constraints. Alternately grant a\n permission to a user and check the condition.\n \n (Note: The integrity condition will be violated, but we're just\n treating it as a query for this experiment.)\n \n Parameters:\n nU, nR, nC -- number of users, roles, constraints\n sC -- number of roles in each constraint\n limit -- number of allowed roles from each constraint set\n until condition is considered violated\n \"\"\"\n \n def generate(self, P):\n nU, nR, nC = P['nU'], P['nR'], P['nC']\n sC = P['sC']\n limit = P['limit']\n \n UR = list(product(range(nU), range(nR)))\n shuffle(UR)\n \n SSDNR = [(i, j) for i in range(nC)\n for j in sample(range(nR), sC)]\n \n return dict(\n dsparams = P,\n UR = UR,\n SSDNR = SSDNR,\n )\n\n\nclass CRBACDriver:\n \n check_interval = 10\n timeout = 60\n \n def __init__(self, pipe_filename):\n import gc\n import pickle\n \n gc.disable()\n \n with open(pipe_filename, 'rb') as pf:\n dataset, prog, other_tparams = pickle.load(pf)\n os.remove(pipe_filename)\n \n \n self.dataset = dataset\n self.prog = prog\n self.module = None\n self.results = {}\n \n self.setUp()\n \n from frexp.util import StopWatch, user_time\n from time import process_time, perf_counter\n timer_user = StopWatch(user_time)\n timer_cpu = StopWatch(process_time)\n timer_wall = StopWatch(perf_counter)\n \n # Make available to run for timeouts.\n self.timer_cpu = timer_cpu\n \n with timer_user, timer_cpu, timer_wall:\n finished = self.run()\n \n if finished:\n import incoq.runtime\n self.results['size'] = incoq.runtime.get_total_structure_size(\n self.module.__dict__)\n self.results['time_user'] = timer_user.consume()\n self.results['time_cpu'] = timer_cpu.consume()\n self.results['time_wall'] = timer_wall.consume()\n \n self.results['stdmetric'] = self.results['time_cpu']\n else:\n self.results['timedout'] = True\n \n \n with open(pipe_filename, 'wb') as pf:\n pickle.dump(self.results, pf)\n \n def setUp(self):\n # Import driven program.\n dirname, filename = os.path.split(self.prog)\n if dirname:\n sys.path.append(dirname)\n try:\n self.module = importlib.import_module(\n 'experiments.rbac.constrainedrbac.' + filename)\n finally:\n if dirname:\n sys.path.pop()\n \n \n m = self.module\n ds = self.dataset\n P = ds['dsparams']\n \n # Initialize dataset.\n for i in range(P['nU']):\n m.add_user('u' + str(i))\n for i in range(P['nC']):\n m.add_ssdnc('c' + str(i), P['limit'])\n for i, j in ds['SSDNR']:\n m.add_ssdnr('c' + str(i), 'r' + str(j))\n m.do_query()\n \n # Preprocess operations.\n self.OPS = []\n for i, j in ds['UR']:\n self.OPS.append(('u' + str(i), 'r' + str(j)))\n \n def run(self):\n add_ur = self.module.add_ur\n do_query = self.module.do_query_nodemand\n \n check_interval = self.check_interval\n timer_cpu = self.timer_cpu\n timeout = self.timeout\n \n for i, (u, r) in enumerate(self.OPS):\n # Check timeout every so often.\n if i % check_interval == 0:\n if timer_cpu.elapsed > timeout:\n return False\n \n add_ur(u, r)\n do_query()\n \n return True\n\n\nclass CRBACRunner(Runner):\n \n def run_all_tests(self, tparams_list):\n # Hack to skip trials for a prog after there's been a timeout.\n blacklist = set()\n \n datapoint_list = []\n for i, trial in enumerate(tparams_list, 1):\n prog = trial['prog']\n if prog in blacklist:\n self.print('Skipping test ' + str(i))\n continue\n \n itemstr = 'Running test {} of {} ...'.format(i, len(tparams_list))\n self.print(itemstr, end='')\n \n datapoints, timedout = self.repeat_single_test(trial, len(itemstr))\n if timedout:\n blacklist.add(prog)\n datapoint_list.extend(datapoints)\n \n return datapoint_list\n\n\nclass CRBACWorkflow(ExpWorkflow):\n \n ExpDatagen = CRBACDatagen\n ExpDriver = CRBACDriver\n ExpRunner = CRBACRunner\n# verifier\n \n require_ac = False ###\n\n\nclass CRBACScale(CRBACWorkflow):\n \n \"\"\"Scale up the number of users and constraints.\n \n Expectation: Linear for Inc, because for each update, we find a\n constant number of constraints that have the role, increment their\n counts (the inner query), and update the outer query in constant\n time. Cubic for Aux, because of three linear factors: number of\n users and constraints (both from outer query), and number of\n query operations (= # users). \n \"\"\"\n \n prefix = 'results/crbac'\n \n class ExpDatagen(CRBACWorkflow.ExpDatagen):\n \n progs = [\n 'crbac_orig',\n 'crbac_aux',\n# 'crbac_inc',\n 'crbac_dem',\n ]\n \n def get_dsparams_list(self):\n return [\n dict(\n dsid = str(x),\n x = x,\n \n nU = x,\n nR = 5,\n nC = x,\n sC = 5,\n limit = 3,\n )\n for x in list(range(2, 20, 2)) + list(range(20, 100 + 1, 5))\n ]\n \n stddev_window = .1\n min_repeats = 10\n max_repeats = 50\n \n class ExpExtractor(MetricExtractor, SmallExtractor):\n \n series = [\n ('crbac_orig', 'original',\n 'red', '- s poly5'),\n ('crbac_aux', 'auxiliary maps',\n 'orange', '-- s poly3'),\n ('crbac_inc', 'unfiltered',\n 'blue', '- o poly2'),\n ('crbac_dem', 'filtered',\n 'green', '- ^ poly2'),\n ]\n \n legend_loc = 'upper center'\n \n ylabel = 'Running time (in seconds)'\n xlabel = 'Number of users and constraints'\n \n metric = 'time_cpu'\n \n ymin = 0\n ymax = 10\n" }, { "alpha_fraction": 0.4593175947666168, "alphanum_fraction": 0.5144357085227966, "avg_line_length": 20.16666603088379, "blob_id": "0acfc2a8ad400eb42f23ac716984e50b5d122163", "content_id": "eb92fad7567be946f273dc391539f32aec95f4ce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 381, "license_type": "no_license", "max_line_length": 74, "num_lines": 18, "path": "/incoq/tests/programs/comp/uset/uset_in.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "# Incrementalized comprehensions with U-set.\n\nfrom incoq.runtime import *\n\nE = Set()\n\nfor v1, v2 in {(1, 2), (2, 3), (2, 4), (4, 5)}:\n E.add((v1, v2))\n\nx = 1\n\nQUERYOPTIONS(\n '{z for (x2, y) in E for (y2, z) in E if x == x2 if y == y2}',\n params = ['x'],\n uset_mode = 'all',\n impl = 'inc',\n)\nprint(sorted({z for (x2, y) in E for (y2, z) in E if x == x2 if y == y2}))\n" }, { "alpha_fraction": 0.5601357817649841, "alphanum_fraction": 0.5614088177680969, "avg_line_length": 33.72842025756836, "blob_id": "d3fc4150205466a4930b14d36c3dbb2986bada52", "content_id": "f90878eb6a9d04f4d6dc620b54df1dd1ce54e4a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16496, "license_type": "no_license", "max_line_length": 77, "num_lines": 475, "path": "/incoq/compiler/demand/tags.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Tag-based demand transformation strategy.\"\"\"\n\n\n__all__ = [\n 'make_structures',\n 'prune_structures',\n 'get_used_filters',\n 'structures_to_comps',\n 'uset_to_comp',\n 'filter_comps',\n]\n\n\nfrom types import SimpleNamespace\nfrom operator import attrgetter\n\nfrom simplestruct import Struct, Field, TypedField\n\nfrom incoq.compiler.set import Mask\nfrom incoq.compiler.comp import (Clause, EnumClause, inst_wildcards,\n Join, CompSpec)\nimport incoq.compiler.incast as L\n\nfrom .demclause import DemClause\n\n\nKIND_TAG = 'TAG'\nKIND_FILTER = 'FILTER'\nKIND_USET = 'USET'\n\n\nclass Tag(Struct):\n kind = KIND_TAG\n \n i = TypedField(int)\n \"\"\"Index of query enumerator for which tag is introduced.\"\"\"\n name = TypedField(str)\n \"\"\"Name of tag set.\"\"\"\n var = TypedField(str)\n \"\"\"Query variable being tagged.\"\"\"\n lhs = TypedField(str, seq=True)\n \"\"\"LHS of the query enumerator.\"\"\"\n rel = TypedField(str)\n \"\"\"Relation being projected. This is either the original relation\n iterated by the query enumerator, or else a filter over it.\n \"\"\"\n reorder_i = TypedField(int)\n \"\"\"Relative order for demand graph.\"\"\"\n\nclass Filter(Struct):\n kind = KIND_FILTER\n \n i = TypedField(int)\n \"\"\"Index of query enumerator for which filter is introduced.\"\"\"\n name = TypedField(str)\n \"\"\"Name of filter set.\"\"\"\n lhs = TypedField(str, seq=True)\n \"\"\"LHS of the query enumerator.\"\"\"\n rel = TypedField(str)\n \"\"\"RHS of the query enumerator, i.e., set being filtered.\"\"\"\n preds = TypedField(str, seq=True)\n \"\"\"Names of predecessor tags.\"\"\"\n reorder_i = TypedField(int)\n \"\"\"Relative order for demand graph.\"\"\"\n\nclass USet(Struct):\n kind = KIND_USET\n \n i = TypedField(int)\n \"\"\"Index of query enumerator where the subquery is iterated over.\"\"\"\n name = TypedField(str)\n \"\"\"Name of associated demand name.\"\"\"\n vars = TypedField(str, seq=True)\n \"\"\"Vars that get passed to the demand functions as parameters.\"\"\"\n preds = Field()\n \"\"\"Names of predecessor tags, or None if using clauses.\"\"\"\n pred_clauses = Field()\n \"\"\"Predecessor clauses, or None if using tags.\"\"\"\n reorder_i = TypedField(int)\n \"\"\"Relative order for demand graph.\"\"\"\n\n\nclass DemStructures(Struct):\n _immutable = False\n \n tags = Field()\n filters = Field()\n usets = Field()\n \n @property\n def structs(self):\n \"\"\"All tags, filters, and usets, returned in dependency order.\"\"\"\n # Tags must come after the filters at their own enumerator.\n # Relies on stability of list.sort.\n structs = self.filters + self.usets + self.tags\n structs.sort(key=attrgetter('reorder_i'))\n return structs\n\n\ndef make_structures_create(clauses, *, reorder=None):\n \"\"\"Return a DemStructures object with partially-constructed\n tag/filter/uset values. They are missing values for the name,\n rel, and preds/pred_clauses fields.\n \"\"\"\n # Use SimpleNamespace, to allow partially constructed state.\n SN_Tag = SN_Filter = SN_USet = SimpleNamespace\n \n # Store relative order for demand graph.\n if reorder is None:\n reorder = [i for i in range(len(clauses))]\n assert (len(reorder) == len(clauses) and\n set(reorder) == set(range(len(clauses))))\n \n # Initialize structures, leaving some fields blank.\n \n # For each enumerator, make a tag for the enumvars that appear\n # in a taggable position.\n tags = [SN_Tag(kind=KIND_TAG,\n i=i,\n name=None,\n var=v,\n lhs=e.enumlhs,\n rel=None,\n reorder_i=reorder.index(i))\n for i, e in enumerate(clauses)\n if e.kind is e.KIND_ENUM\n for v in e.enumvars_tagsout]\n \n # Make a filter for each enumerator that is not over a\n # demand-driven subquery.\n filters = [SN_Filter(kind=KIND_FILTER,\n i=i,\n name=None,\n lhs=e.enumlhs,\n rel=e.enumrel,\n preds=None,\n reorder_i=reorder.index(i))\n for i, e in enumerate(clauses)\n if e.kind is e.KIND_ENUM\n if not e.has_demand]\n \n # Make a uset for each enumerator over a demand-driven subquery.\n usets = [SN_USet(kind=KIND_USET,\n i=i,\n name=e.demname,\n vars=e.demparams,\n preds=None,\n pred_clauses=None,\n reorder_i=reorder.index(i))\n for i, e in enumerate(clauses)\n if e.kind is e.KIND_ENUM\n if e.has_demand]\n \n return DemStructures(tags=tags, filters=filters, usets=usets)\n\ndef make_structures_name(qname, ds):\n \"\"\"Fill in name information, in-place.\"\"\"\n tags = ds.tags\n filters = ds.filters\n \n # The number gets excluded if the name is unambiguous.\n \n for i, t in enumerate(tags):\n others = [t2 for t2 in tags if t2.var == t.var]\n prev_others = [t2 for t2 in tags[:i] if t2.var == t.var]\n numstr = (str(len(prev_others) + 1)\n if len(others) > 1 else '')\n t.name = '{}_T{}{}'.format(qname, t.var, numstr)\n \n for i, f in enumerate(filters):\n others = [f2 for f2 in filters if f2.rel == f.rel]\n prev_others = [f2 for f2 in filters[:i] if f2.rel == f.rel]\n numstr = (str(len(prev_others) + 1)\n if len(others) > 1 else '')\n f.name = '{}_d{}{}'.format(qname, f.rel, numstr)\n\ndef make_structures_preds(clauses, ds, *,\n singletag, subdem_tags):\n \"\"\"Fill in predecessor information, in-place.\"\"\"\n # If subdem_tags is True, we use the same predecessor-finding\n # logic for both filters and usets. Otherwise, we handle usets\n # separately below.\n if subdem_tags:\n needs_predtags = ds.filters + ds.usets\n else:\n needs_predtags = ds.filters\n \n for s in needs_predtags:\n e = clauses[s.i]\n s.preds = []\n for v in e.enumvars_tagsin:\n # Use tags introduced to the left of here.\n preds = [t.name for t in ds.tags\n if t.var == v\n if t.reorder_i < s.reorder_i]\n # When using singletag, only choose the first\n # (leftmost) tag. The others will be pruned.\n if singletag:\n preds = preds[:1]\n s.preds.extend(preds)\n s.preds = tuple(s.preds)\n \n if not subdem_tags:\n for s in ds.usets:\n # Use the join of all clauses to the left of here.\n # Strip DEMQUERY from those clauses.\n preds = tuple(cl.cl if isinstance(cl, DemClause) else cl\n for cl in clauses[:s.i])\n s.pred_clauses = preds\n\ndef make_structures_finish(clauses, ds, *, subdem_tags):\n \"\"\"Get rid of filters that don't filter anything. Make tags\n refer to the right relation.\n \"\"\"\n tags = ds.tags\n filters = ds.filters\n usets = ds.usets\n \n # Remove unnecessary filters.\n ds.filters = filters = [f for f in filters if len(f.preds) > 0]\n \n # Make sure appropriate u-set preds exist for each parameter\n # tracked in that u-set.\n if subdem_tags:\n tags_by_name = {t.name: t for t in tags}\n for u in usets:\n for v in u.vars:\n if not any(tags_by_name[p].var == v for p in u.preds):\n raise AssertionError(\n 'Outer comprehension does not have a tag to '\n 'constrain subquery parameter \"{}\" '\n '(clauses: {})'.format(\n v, ', '.join(str(c) for c in clauses)))\n else:\n for u in usets:\n for v in u.vars:\n if not any(v in cl.enumvars for cl in u.pred_clauses):\n raise AssertionError(\n 'Outer comprehension does not have a clause '\n 'to constraint subquery parameter \"{}\" '\n '(clauses: {})'.format(\n v, ', '.join(str(c) for c in clauses)))\n \n # We could at this point get rid of tags that are not predecessors\n # to any filter, but we will have more information about what we\n # can remove after we know the join orders of the maintenance\n # comprehensions.\n \n # Set up tags to refer to filters, or the original iterated\n # relation if the filter was removed or it's a uset.\n for t in tags:\n for f in filters:\n if t.i == f.i:\n t.rel = f.name\n break\n else:\n t.rel = clauses[t.i].enumrel\n\ndef make_structures(clauses, qname, *, singletag, subdem_tags, reorder=None):\n \"\"\"Generate tag/filter/uset structures for the given clauses\n and query name. Return the structures in dependency order.\n \"\"\"\n ds = make_structures_create(clauses, reorder=reorder)\n make_structures_name(qname, ds)\n make_structures_preds(clauses, ds,\n singletag=singletag,\n subdem_tags=subdem_tags)\n make_structures_finish(clauses, ds,\n subdem_tags=subdem_tags)\n \n # Convert from SimpleNamespace to Struct objects.\n # If we screwed up, here's where we'll get a type error.\n ds.tags = [Tag(t.i, t.name, t.var, t.lhs, t.rel, t.reorder_i)\n for t in ds.tags]\n ds.filters = [Filter(f.i, f.name, f.lhs, f.rel, f.preds, f.reorder_i)\n for f in ds.filters]\n ds.usets = [USet(u.i, u.name, u.vars, u.preds,\n u.pred_clauses, u.reorder_i)\n for u in ds.usets]\n \n return ds\n\n\ndef prune_structures(ds, used_indices, *, subdem_tags):\n \"\"\"Remove tags and filters that are not needed, given a sequence\n of indices of query enumerators for which filters are needed.\n Modify ds in-place.\n \"\"\"\n tags = ds.tags\n filters = ds.filters\n usets = ds.usets\n \n # Index from tag name to tag object.\n tags_by_name = None\n # Index from tag object to list of filters and usets\n # who have this tag as a predecessor.\n inv_preds = None\n def recompute():\n nonlocal tags_by_name, inv_preds\n \n tags_by_name = {t.name: t for t in tags}\n \n if subdem_tags:\n uses_predtags = filters + usets\n else:\n uses_predtags = filters\n \n inv_preds = {t: [] for t in tags}\n for s in uses_predtags:\n for tname in s.preds:\n t = tags_by_name[tname]\n inv_preds[t].append(s)\n \n # Remove filters that are both unrequested and do not define\n # useful tags. Remove tags that are not predecessors to useful\n # filters or to usets. Repeat until fixed point.\n changed = True\n while changed:\n changed = False\n recompute()\n \n for t in list(tags):\n if len(inv_preds[t]) == 0:\n tags.remove(t)\n changed = True\n \n for f in list(filters):\n if not (f.i in used_indices or\n any(t.rel == f.name for t in tags)):\n filters.remove(f)\n changed = True\n\n\n### Below this point has not been refactored.\n\ndef get_used_filters(ds, ordering, use_tag_checks):\n \"\"\"Take in the demand structures for a query, and an ordering of\n clauses for one of the query's maintenance comps. Return a set of\n the clause indices for which filter relations are used.\n \"\"\"\n # Ignore conditions.\n ordering = [(i, cl, bindenv) for i, cl, bindenv in ordering\n if cl.kind is Clause.KIND_ENUM]\n \n filters = ds.filters\n filters_by_index = {f.i: f for f in filters}\n \n used_indices = set()\n for i, cl, bindenv in ordering:\n if cl.kind is Clause.KIND_COND:\n continue\n if i not in filters_by_index:\n continue\n \n # Singleton clauses are filtered if we're doing tag checks.\n deltamask = Mask.from_vars(cl.enumlhs, cl.enumlhs)\n if cl.isdelta and (use_tag_checks or deltamask.has_wildcards):\n used_indices.add(i)\n \n # Consult clause rules.\n elif cl.needs_filtering(bindenv):\n used_indices.add(i)\n \n return used_indices\n\n\ndef structures_to_comps(ds, factory):\n \"\"\"Convert tags and filters to comprehensions that define them.\n Return pairs of comps and their names, in dependency order.\n Ignore usets.\n \"\"\"\n tags_by_name = {t.name: t for t in ds.tags}\n result = []\n \n for s in ds.structs:\n if s.kind is KIND_TAG:\n cl = EnumClause(s.lhs, s.rel)\n spec = CompSpec(Join([cl], factory, None), L.ln(s.var), [])\n elif s.kind is KIND_FILTER:\n cls = []\n for tname in s.preds:\n t = tags_by_name[tname]\n cls.append(EnumClause([t.var], t.name))\n # Be sure to replace wildcards with fresh vars.\n lhs = inst_wildcards(s.lhs)\n cls.append(EnumClause(lhs, s.rel))\n spec = CompSpec(Join(cls, factory, None), L.tuplify(lhs), [])\n elif s.kind is KIND_USET:\n continue\n else:\n assert()\n \n result.append((s.name, spec.to_comp({})))\n \n return result\n\ndef uset_to_comp(ds, uset, factory, first_clause):\n \"\"\"Convert a uset to a comprehension.\"\"\"\n subdem_tags = uset.preds is not None\n \n if subdem_tags:\n tags_by_name = {t.name: t for t in ds.tags}\n clauses = []\n for tname in uset.preds:\n t = tags_by_name[tname]\n clauses.append(EnumClause([t.var], t.name))\n else:\n clauses = uset.pred_clauses\n \n # As an odd special case, if there are no preds,\n # use an emptiness test on the first enumerator,\n # which should be a U-set.\n if len(clauses) == 0:\n assert first_clause.kind is Clause.KIND_ENUM\n assert uset.i != 0, 'Can\\'t make demand invariant for inner ' \\\n 'query; it is the first clause'\n cl = EnumClause(tuple('_' for _ in first_clause.enumlhs),\n first_clause.enumrel)\n clauses.append(cl)\n \n spec = CompSpec(Join(clauses, factory, None), L.tuplify(uset.vars), [])\n return spec.to_comp({})\n\n\ndef filter_comps(tree, factory, ds, comps, use_tag_checks, *,\n augmented, subdem_tags):\n \"\"\"Transform maintenance comps to use filters. Return the\n modified tree and the structs that are actually needed by\n one or more of the comps.\n \n If augmented is True, uses of filters for the delta relation\n are modified to subtract the delta element. \n \"\"\"\n filters = ds.filters\n index_to_filtername = {f.i: f.name for f in filters}\n \n used_indices = set()\n for comp in comps:\n spec = CompSpec.from_comp(comp, factory)\n join = spec.join\n \n ordering = join.get_ordering(spec.params)\n \n new_used_indices = get_used_filters(ds, ordering, use_tag_checks)\n \n new_clauses = []\n for i, cl in enumerate(join.clauses):\n if i in new_used_indices:\n if cl.isdelta:\n new_cl = factory.rewrite_rel(cl, index_to_filtername[i])\n new_clauses.append(new_cl)\n if use_tag_checks:\n check_cl = EnumClause(cl.lhs, index_to_filtername[i])\n new_clauses.append(check_cl)\n else:\n new_cl = factory.rewrite_rel(\n cl, index_to_filtername[i])\n if augmented:\n if cl.enumrel == join.delta.rel:\n new_cl = factory.subtract_inner(\n new_cl, join.delta.elem)\n new_clauses.append(new_cl)\n else:\n new_clauses.append(cl)\n new_join = join._replace(clauses=new_clauses)\n \n new_spec = spec._replace(join=new_join)\n new_comp = new_spec.to_comp(comp.options)\n \n tree = L.QueryReplacer.run(tree, comp, new_comp)\n \n used_indices |= new_used_indices\n \n prune_structures(ds, used_indices, subdem_tags=subdem_tags)\n return tree, ds\n" }, { "alpha_fraction": 0.7023208737373352, "alphanum_fraction": 0.7114025950431824, "avg_line_length": 27.314285278320312, "blob_id": "1355cf00ab50fd108f7f1700c6d72b8531ba9a84", "content_id": "b2176725ef86ae7051ba78fbead4f995f48f0886", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 991, "license_type": "no_license", "max_line_length": 126, "num_lines": 35, "path": "/env.sh", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "#!/bin/sh\n\n# Adjust PYTHONPATH in the current shell to include IncOQ and dependencies.\n# Invoke as `source env.sh` or `source env.sh -w`.\n#\n# The -w flag is for Cygwin environments which use a non-Cygwin\n# (native Windows) Python interpreter. It causes the path entries\n# to be added in Windows format.\n\n\ngetopts w WINPATH\n# Since we're invoked as a source script, clean up after ourselves\n# or we can't run twice.\nOPTIND=1\nOPTARG=\n\n# Courtesy http://stackoverflow.com/questions/4774054/reliable-way-for-a-bash-script-to-get-the-full-path-to-itself\nINCOQ_DIR=$(cd `dirname \"${BASH_SOURCE[0]}\"` && pwd)\nDEPS_DIR=`dirname $INCOQ_DIR`/deps\n\nNEWENTRIES=\"$INCOQ_DIR:$DEPS_DIR/simplestruct:$DEPS_DIR/iast:$DEPS_DIR/frexp:$DEPS_DIR/distalgo:$DEPS_DIR/gendb:$DEPS_DIR/osq\"\n\nif [ $WINPATH = \"w\" ]; then\n NEWENTRIES=`cygpath -wp $NEWENTRIES`\n SEP=';'\nelse\n SEP=':'\nfi\n\nif [ -z $PYTHONPATH ]; then\n SEP=''\nfi\n\nexport PYTHONPATH=\"${PYTHONPATH}${SEP}${NEWENTRIES}\"\necho \"Updated PYTHONPATH.\"\n" }, { "alpha_fraction": 0.5728248357772827, "alphanum_fraction": 0.5760827660560608, "avg_line_length": 31.14373779296875, "blob_id": "3edd3a3954d4e2e6875947aad8b5f25e3c66e971", "content_id": "2991be5286aaa5843d7630ff5fe6d0d6de123284", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15654, "license_type": "no_license", "max_line_length": 79, "num_lines": 487, "path": "/experiments/rbac/corerbac/rbac_helper.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "###############################################################################\n# rbac_helper.py #\n# Author: Jon Brandvein #\n###############################################################################\n\n\"\"\"Utility that assists with generating rbac operations.\n\nThere are two main contributions: A logger that combines the execution\nof RBAC operations with the appending of their call information to a\nlist; and an emitter that provides no-argument methods for generating\nrandom RBAC operations.\n\nIronically, there are some performance issues with running the generator\nto produce large sequences of operations. It looks like the generator\nitself could benefit from a partially incrementalized RBAC\nimplementation.\n\"\"\"\n\n\nimport random\nfrom itertools import count\nfrom time import clock\nfrom collections import Counter\nfrom operator import itemgetter\n\nfrom .coreRBAC import CoreRBAC\n\n\ncorerbac_update_names = [\n 'AddUser',\n 'DeleteUser',\n 'AddRole',\n 'DeleteRole',\n 'AssignUser',\n 'DeassignUser',\n 'AddOperation', # These two are not in the ANSI standard.\n 'AddObject', #\n 'GrantPermission',\n 'RevokePermission',\n 'CreateSession',\n 'DeleteSession',\n 'AddActiveRole',\n 'DropActiveRole',\n]\ncorerbac_query_names = [\n 'CheckAccess',\n 'AssignedUsers',\n 'AssignedRoles',\n 'RolePermissions',\n 'UserPermissions',\n 'SessionRoles',\n 'SessionPermissions',\n 'RoleOperationsOnObject',\n 'UserOperationsOnObject',\n]\ncorerbac_op_names = corerbac_update_names + corerbac_query_names\n\ndef choice(seq):\n \"\"\"Like random.choice(), but accept sets, and raise ValueError\n if seq is empty.\n \"\"\"\n if len(seq) == 0:\n raise ValueError\n if isinstance(seq, set):\n seq = list(seq)\n return random.choice(seq)\n\ndef strcounter(prefix):\n \"\"\"Yield strings \"p0\", \"p1\", \"p2\", ..., where p is the given prefix.\"\"\"\n for i in count():\n yield prefix + str(i)\n\n\nclass LoggingCoreRBAC(CoreRBAC):\n \n \"\"\"A version of CoreRBAC that adds, for each query or update\n operation OP, a method log_OP that calls OP and appends the\n call info to self.log. The log is a list of tuples (OP name, *args).\n \"\"\"\n \n # The log_* methods are auto-generated as wrapper functions\n # after this class definition.\n \n def __init__(self):\n super().__init__() # Init internal rbac state.\n self.log = []\n\ndef make_logger(name):\n \"\"\"Given an operation name, return a logger method that\n appends (name, *args) to self.log and then calls the operation\n method by that name.\n \"\"\"\n def logger(self, *args):\n self.log.append((name,) + args)\n f = getattr(self, name)\n return f(*args)\n \n logger.__name__ = 'log_' + name\n return logger\n\nfor n in corerbac_op_names:\n setattr(LoggingCoreRBAC, 'log_' + n, make_logger(n))\n\n\nclass EmittingCoreRBAC(LoggingCoreRBAC):\n \n \"\"\"Introduces a set of emit_* helper methods for generating\n operations with random valid arguments. If an emit_* method\n succeeds in finding such argument values, it logs the call\n and returns True; otherwise it returns False without actually\n mutating any state.\n \n For debugging and instrumentation purposes, the number of\n attempts, failures, and time taken for each kind of operation\n is tracked.\n \"\"\"\n \n def __init__(self):\n super().__init__()\n \n # Counters for generating fresh domain values.\n self.n_users = strcounter('u')\n self.n_roles = strcounter('r')\n self.n_ops = strcounter('op')\n self.n_objs = strcounter('obj')\n self.n_sessions = strcounter('s')\n \n self.gentimes = Counter()\n self.attempts = Counter()\n self.failures = Counter()\n \n def report(self):\n \"\"\"Format the attempts, failures, and gentime info for display.\"\"\"\n s = ''\n \n s += 'Time:\\n'\n total = sum(self.gentimes.values())\n for name, t in sorted(self.gentimes.items(), key=itemgetter(0)):\n s += ' {:<20} {:<8.3f} {:>2.0f}%\\n'.format(\n name + ':', t, t/total * 100)\n s += '-' * 40 + '\\n'\n s += ' {:<20} {:.3f}\\n\\n'.format('Total:', total)\n \n s += 'Failure rate:\\n'\n total_fail = sum(self.failures.values())\n total_att = sum(self.attempts.values())\n for name, f in sorted(self.failures.items(), key=itemgetter(0)):\n a = self.attempts[name]\n s += ' {:<20} {:<10} {:>2.0f}%\\n'.format(\n name + ':', str(f) + '/' + str(a), f/a * 100)\n s += '-' * 40 + '\\n'\n s += ' {:<20} {:<10} {:>2.0f}%\\n'.format(\n 'Total:',\n str(total_fail) + '/' + str(total_att),\n total_fail/total_att * 100)\n \n return s\n \n # Emitters are wrappers around gen_* methods. Each gen_*\n # returns valid args for its associated operation, or raises\n # ValueError. The emit_* methods are defined programmatically,\n # after this class definition.\n \n def gen_AddUser(self):\n return (next(self.n_users),)\n \n def gen_DeleteUser(self):\n return (choice(self.USERS),)\n \n def gen_AddRole(self):\n return (next(self.n_roles),)\n \n def gen_DeleteRole(self):\n return (choice(self.ROLES),)\n \n def gen_AssignUser(self):\n dom = {(u, r) for u in self.USERS for r in self.ROLES}\n dom -= self.UR\n return choice(dom)\n \n def gen_DeassignUser(self):\n return choice(self.UR)\n \n def gen_AddOperation(self):\n return (next(self.n_ops),)\n \n def gen_AddObject(self):\n return (next(self.n_objs),)\n \n def gen_GrantPermission(self):\n dom = {((op, obj), r) for op in self.OPS for obj in self.OBJS\n for r in self.ROLES}\n dom -= self.PR\n ((op, obj), role) = choice(dom)\n return (op, obj, role)\n \n def gen_RevokePermission(self):\n ((op, obj), role) = choice(self.PR)\n return (op, obj, role)\n \n def gen_CreateSession(self):\n u = choice(self.USERS)\n s = next(self.n_sessions)\n roles = self.AssignedRoles(u)\n ars = random.sample(roles, random.randrange(len(roles) + 1))\n # Use frozenset to play nice with hashed containers.\n ars = frozenset(ars)\n return (u, s, ars)\n \n def gen_DeleteSession(self):\n session, user = choice(self.SU)\n return (user, session)\n \n def gen_AddActiveRole(self):\n dom = {(u, s, r) for (s, u) in self.SU\n for r in self.ROLES if (u, r) in self.UR\n if (s, r) not in self.SR}\n return choice(dom)\n \n def gen_DropActiveRole(self):\n (s, r) = choice(self.SR)\n u = next(u for u in self.USERS if (s, u) in self.SU)\n return (u, s, r)\n \n def gen_CheckAccess(self):\n return choice(self.SESSIONS), choice(self.OPS), choice(self.OBJS)\n \n def gen_AssignedUsers(self):\n return (choice(self.ROLES),)\n \n def gen_AssignedRoles(self):\n return (choice(self.USERS),)\n \n def gen_RolePermissions(self):\n return (choice(self.ROLES),)\n \n def gen_UserPermissions(self):\n return (choice(self.USERS),)\n \n def gen_SessionRoles(self):\n return (choice(self.SESSIONS),)\n \n def gen_SessionPermissions(self):\n return (choice(self.SESSIONS),)\n \n def gen_RoleOperationsOnObject(self):\n return (choice(self.ROLES), choice(self.OBJS))\n \n def gen_UserOperationsOnObject(self):\n return (choice(self.USERS), choice(self.OBJS))\n\ndef make_emitter(name):\n \"\"\"Given an operation name, return an emitter method that\n uses the corresponding gen_* to produce arguments (if possible),\n and then calls the corresponding log_* method; and returns a\n bool indicating whether it succeeded.\n \"\"\"\n def emitter(self):\n f_gen = getattr(self, 'gen_' + name)\n f_log = getattr(self, 'log_' + name)\n \n self.attempts[name] += 1\n \n try:\n t1 = clock()\n args = f_gen()\n t2 = clock()\n self.gentimes[name] += t2 - t1\n assert isinstance(args, tuple)\n \n except ValueError:\n self.failures[name] += 1\n return False\n \n f_log(*args)\n return True\n \n emitter.__name__ = 'emit_' + n\n return emitter\n\nfor n in corerbac_op_names:\n setattr(EmittingCoreRBAC, 'emit_' + n, make_emitter(n))\n\n\nclass DemandEmittingCoreRBAC(EmittingCoreRBAC):\n \n \"\"\"Refinement of the above operation generator that also tracks a\n special subset of queryable sessions. The queryable sessions\n cannot be removed by updates (DeleteSession, or indirectly via\n something like DeleteUser). CheckAccess calls will only be emitted\n for queryable sessions, not other sessions.\n \"\"\"\n \n def __init__(self, n_queryable):\n super().__init__()\n # If there are n queryable sessions, they are named s0 up to\n # s(n-1).\n self.queryable = {'s' + str(i) for i in range(n_queryable)}\n \n # We redefine the appropriate gen_* methods here. No need to\n # programmatically recreate the emit_* methods, since we were\n # careful to define those in a way that doesn't couple them with\n # the particular gen_* implementations in EmittingCoreRBAC.\n \n def gen_DeleteUser(self):\n # Get all users who do *not* have a queryable session.\n dom = [u for u in self.USERS\n if all((s, u) not in self.SU\n for s in self.queryable)]\n return (choice(dom),)\n \n def gen_DeleteRole(self):\n # Get all roles who do *not* have a queryable session.\n dom = [r for r in self.ROLES\n if all((s, r) not in self.SR\n for s in self.queryable)]\n return (choice(dom),)\n \n def gen_DeassignUser(self):\n # Get all user/role pairs where there is no queryable session\n # for that user with that role activated.\n dom = [(u, r) for (u, r) in self.UR\n if all(not ((s, u) in self.SU and (s, r) in self.SR)\n for s in self.queryable)]\n return choice(dom)\n \n def gen_DeleteSession(self):\n # Only choose non-queryable sessions.\n dom = [(s, u) for (s, u) in self.SU\n if s not in self.queryable]\n session, user = choice(dom)\n return (user, session)\n \n def gen_CheckAccess(self):\n # Only choose queryable sessions.\n dom = list(self.queryable & self.SESSIONS)\n return choice(dom), choice(self.OPS), choice(self.OBJS)\n\n\nclass FastEmittingCoreRBAC(EmittingCoreRBAC):\n \n \"\"\"Faster RBAC emitter that is allowed to give up trying to\n generate an operation even when one exists. This may bias\n the generation away from operations for which a random\n assignment of parameters is less likely to be valid.\n \n Specifically, this emitter does not construct expensive\n Cartesian products for things like choosing an element from\n the complement of a relation. As a consequence, some operations\n now only take linear time in the size of the RBAC data structures.\n \"\"\"\n \n def gen_AssignUser(self):\n tup = (choice(self.USERS), choice(self.ROLES))\n if tup in self.UR:\n raise ValueError\n return tup\n \n def gen_GrantPermission(self):\n op = choice(self.OPS)\n obj = choice(self.OBJS)\n r = choice(self.ROLES)\n if ((op, obj), r) in self.PR:\n raise ValueError\n return (op, obj, r)\n \n def gen_AddActiveRole(self):\n (s, u) = choice(self.SU)\n r = choice({r for (u2, r) in self.UR if u2 == u})\n if (s, r) in self.SR:\n raise ValueError\n return (u, s, r)\n\nclass FastDemandEmittingCoreRBAC(DemandEmittingCoreRBAC, FastEmittingCoreRBAC):\n \n \"\"\"Extension of the demand-aware data generator to also be fast.\"\"\"\n \n def gen_DeleteUser(self):\n u = choice(self.USERS)\n if any((s, u) in self.SU\n for s in self.queryable):\n raise ValueError\n return (u,)\n \n def gen_DeleteRole(self):\n r = choice(self.ROLES)\n if any((s, r) in self.SR\n for s in self.queryable):\n raise ValueError\n return (r,)\n \n def gen_DeassignUser(self):\n (u, r) = choice(self.UR)\n if any((s, u) in self.SU and (s, r) in self.SR\n for s in self.queryable):\n raise ValueError\n return (u, r)\n\n\nclass SREmittingCoreRBAC(FastDemandEmittingCoreRBAC):\n \n \"\"\"Emitter that's optimized for AddActiveRole/DropActiveRole.\"\"\"\n \n # This implementation is derived by (manually) optimizing\n # computations used by FastEmittingCoreRBAC.AddActiveRole()\n # and EmittingCoreRBAC.\n #\n # Specifically, we cache UR.out, SU.out, and list(SU), which\n # do not change at all during sequences of updates to sessions'\n # active roles. We also incrementally compute SR.out, and\n # allow DropActiveRole() to probabilistically return a false\n # negative.\n \n # Some of the mappings may leak memory due to obsolete keys\n # (similar to defaultdict), but that shouldn't be a problem\n # for us.\n \n def __init__(self, *args):\n super().__init__(*args)\n \n # None means \"requires recomputation\".\n self.URout = None\n self.SUout = None\n self.SUlist = None\n self.SRout = {}\n \n # Updates instrumented with maintenance code.\n \n def AssignUser(self, user, role):\n super().AssignUser(user, role)\n self.URout = None\n \n def DeassignUser(self, user, role):\n super().DeassignUser(user, role)\n self.URout = None\n \n def CreateSession(self, user, session, ars):\n super().CreateSession(user, session, ars)\n self.SUout = None\n self.SUlist = None\n self.SRout.setdefault(session, set()).update(ars)\n \n def DeleteSession(self, user, session):\n super().DeleteSession(user, session)\n self.SUout = None\n self.SUlist = None\n del self.SRout[session]\n \n def AddActiveRole(self, user, session, role):\n super().AddActiveRole(user, session, role)\n self.SRout.setdefault(session, set()).add(role)\n \n def DropActiveRole(self, user, session, role):\n super().DropActiveRole(user, session, role)\n self.SRout[session].remove(role)\n \n # Optimized gen_* functions.\n \n def gen_AddActiveRole(self):\n if self.URout is None:\n self.URout = {}\n for (u, r) in self.UR:\n self.URout.setdefault(u, []).append(r)\n if self.SUlist is None:\n self.SUlist = list(self.SU)\n \n (s, u) = choice(self.SUlist)\n r = choice(self.URout[u])\n if (s, r) in self.SR:\n raise ValueError\n return (u, s, r)\n \n def gen_DropActiveRole(self):\n if self.SUout is None:\n self.SUout = {}\n for (s, u) in self.SU:\n self.SUout[s] = u\n \n s = choice(self.SESSIONS)\n if s not in self.SRout:\n # The present-but-empty case is caught by choice() below.\n raise ValueError\n r = choice(self.SRout[s])\n \n u = self.SUout[s]\n \n return (u, s, r)\n" }, { "alpha_fraction": 0.4960784316062927, "alphanum_fraction": 0.5011204481124878, "avg_line_length": 31.16216278076172, "blob_id": "e324219383d27013915ec174a1a3633c9964824c", "content_id": "e90290c9a8254ac7fe9030c8ab83b44938479420", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3570, "license_type": "no_license", "max_line_length": 76, "num_lines": 111, "path": "/incoq/tests/invinc/incast/test_nodeconv.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Unit tests for nodeconv.py.\"\"\"\n\n\nimport unittest\n\nfrom incoq.compiler.incast.nodes import *\nfrom incoq.compiler.incast.structconv import parse_structast\nfrom incoq.compiler.incast.nodeconv import *\nfrom incoq.compiler.incast.nodeconv import value_to_ast\n\n\nclass NodeconvCase(unittest.TestCase):\n \n def p(self, source, mode=None):\n return parse_structast(source, mode=mode)\n \n def pc(self, source):\n return self.p(source, mode='code')\n \n def ps(self, source):\n return self.p(source, mode='stmt')\n \n def pe(self, source):\n return self.p(source, mode='expr')\n \n def test_valuetoast(self):\n val = {1: 'a', 2: [{3: False}, ({4}, 5, None)]}\n tree = value_to_ast(val)\n exp_tree = self.pe('{1: \"a\", 2: [{3: False}, ({4}, 5, None)]}')\n self.assertEqual(tree, exp_tree)\n \n def test_options_rewriter(self):\n tree = self.p('''\n incoq.runtime.OPTIONS(a='b')\n incoq.runtime.QUERYOPTIONS('foo', a='b')\n ''')\n tree = OptionsRewriter.run(tree)\n exp_tree = self.p('''\n OPTIONS(a='b')\n QUERYOPTIONS('foo', a='b')\n ''')\n self.assertEqual(tree, exp_tree)\n \n def test_import_SetUpdate(self):\n tree = self.ps('S.add(x)')\n tree = IncLangImporter.run(tree)\n exp_tree = SetUpdate(Name('S', Load()), 'add', Name('x', Load()))\n self.assertEqual(tree, exp_tree)\n \n def test_import_OPTIONS(self):\n tree = self.ps('OPTIONS(a = \"b\")')\n tree = IncLangImporter.run(tree)\n exp_tree = NOptions({'a': 'b'})\n self.assertEqual(tree, exp_tree)\n \n def test_import_MAINT(self):\n tree = self.pc('''\n with MAINT(Q, 'after', 'S.add(x)'):\n S.add(x)\n pass\n ''')\n tree = IncLangImporter.run(tree)\n update_node = SetUpdate(Name('S', Load()), 'add', Name('x', Load()))\n exp_tree = (Maintenance('Q', 'S.add(x)',\n (), (update_node,), (Pass(),)),)\n self.assertEqual(tree, exp_tree)\n \n def test_import_COMP(self):\n tree = self.pe('COMP({x for x in S}, [S], {\"a\": \"b\"})')\n tree = IncLangImporter.run(tree)\n exp_tree = Comp(\n Name('x', Load()),\n (Enumerator(Name('x', Store()), Name('S', Load())),),\n ('S',), {'a': 'b'})\n self.assertEqual(tree, exp_tree)\n \n # Make sure omitting params/options works.\n \n tree1 = self.pe('COMP({x for x in S})')\n tree1 = IncLangImporter.run(tree1)\n tree2 = self.pe('COMP({x for x in S}, None, None)')\n tree2 = IncLangImporter.run(tree2)\n exp_tree = Comp(\n Name('x', Load()),\n (Enumerator(Name('x', Store()), Name('S', Load())),),\n None, None)\n self.assertEqual(tree1, exp_tree)\n self.assertEqual(tree2, exp_tree)\n \n def test_export(self):\n orig_tree = self.p('''\n OPTIONS(u = 'v')\n S.add(x)\n setmatch(R, 'bu', x)\n COMP({x for x in S if x in T}, [S], {'a': 'b'})\n sum(R)\n ''')\n tree = IncLangImporter.run(orig_tree)\n tree = IncLangExporter.run(tree)\n exp_tree = self.p('''\n OPTIONS(...)\n S.add(x)\n setmatch(R, 'bu', x)\n COMP({x for x in S if x in T}, [S], {'a': 'b'})\n sum(R, None)\n ''')\n self.assertEqual(tree, exp_tree)\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.5241976380348206, "alphanum_fraction": 0.5274240374565125, "avg_line_length": 34.05356979370117, "blob_id": "c4b9e4b5c69c381c9f1f2f858d19cf9ce9595879", "content_id": "b75c041559068d8d887416f200716ab33e37ece3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5889, "license_type": "no_license", "max_line_length": 73, "num_lines": 168, "path": "/incoq/compiler/comp/order.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Clause ordering algorithm for computing joins.\"\"\"\n\n\n__all__ = [\n 'Rate',\n 'AsymptoticOrderer',\n]\n\n\nfrom incoq.util.type import checktype_seq\n\n\nclass Rate:\n \"\"\"Constants for the greedy join order heuristic. Lower is better.\"\"\"\n # Reserved for delta clause.\n FIRST = -10\n # Prioritize constant-time EnumClause over other constant time\n # clauses to help ensure that demand checks get done before\n # otherwise-type-unsafe operations.\n CONSTANT_MEMBERSHIP = -5\n CONSTANT = 0\n NORMAL = 10\n NOTPREFERRED = 20\n LASTRESORT = 100\n UNRUNNABLE = 1000\n\n\nclass AsymptoticOrderer:\n \n \"\"\"Clause orderer based on the greedy heuristic of minimum-\n asymptotic-cost-first.\n \"\"\"\n \n class State:\n \n \"\"\"State of the search algorithm.\"\"\"\n \n @classmethod\n def get_initial(cls, clauses, init_bounds, overrides):\n return cls(set(init_bounds), [],\n list(clauses), overrides)\n \n def __init__(self, bindenv, chosen, remaining, overrides):\n self.bindenv = bindenv\n \"\"\"Set of bound variables.\"\"\"\n self.chosen = chosen\n \"\"\"List of selected, ordered clauses.\"\"\"\n self.remaining = remaining\n \"\"\"List of remaining clauses, in no particular order.\"\"\"\n self.overrides = overrides\n \"\"\"Override mapping.\"\"\"\n \n def __repr__(self):\n return '({} : {} : {})'.format(\n ', '.join(self.bindenv),\n ', '.join(str(cl) for cl in self.chosen),\n ', '.join(str(cl) for cl in self.remaining))\n \n def __eq__(self, other):\n return (self.bindenv == other.bindenv and\n self.chosen == other.chosen and\n self.remaining == other.remaining,\n self.overrides == other.overrides)\n \n def is_done(self):\n \"\"\"Return True if no more stepping is possible.\"\"\"\n return len(self.remaining) == 0\n \n def get_answer(self):\n \"\"\"Return the order (must be finished).\"\"\"\n assert self.is_done()\n # Add bindenv info to the result.\n result = []\n bindenv = set()\n for i, cl in self.chosen:\n result.append((i, cl, set(bindenv)))\n bindenv.update(cl.enumvars)\n return result\n \n def step_clause(self, item):\n \"\"\"Return the successor state for choosing a specific\n clause.\n \"\"\"\n assert item in self.remaining\n \n i, clause = item\n \n new_bindenv = set(self.bindenv)\n new_bindenv.update(clause.enumvars)\n new_chosen = list(self.chosen)\n new_chosen.append(item)\n new_remaining = list(self.remaining)\n new_remaining.remove(item)\n \n return type(self)(new_bindenv, new_chosen, new_remaining,\n self.overrides)\n \n def step(self, deterministic=False):\n \"\"\"Return a list of successor states.\"\"\"\n assert not self.is_done()\n \n def rate_func(item):\n _i, clause = item\n \n for k, v in self.overrides.items():\n if clause.fits_string(self.bindenv, k):\n return v\n else:\n return clause.rate(self.bindenv)\n \n # Stable sort by lowest cost.\n remaining = list(self.remaining)\n remaining.sort(key=rate_func)\n # Find those tied for best, ordered left-to-right.\n from itertools import groupby\n groups = groupby(remaining, key=rate_func)\n cost, best_clauses = next(groups)\n best_clauses = list(best_clauses)\n \n # Error if the cost indicates it's unrunnable.\n assert cost is not Rate.UNRUNNABLE, \\\n ('Unrunnable clause chosen by join heuristic\\n'\n 'State: ' + str(self))\n \n if deterministic:\n best_clauses = best_clauses[0:1]\n \n return [self.step_clause(cl) for cl in best_clauses]\n \n def __init__(self, overrides=None):\n if overrides == None:\n overrides = {}\n self.overrides = overrides\n \"\"\"Mapping from clauses to priority, to be used in place of\n Clause.rate().\n \"\"\"\n \n def process(self, states, first_only=False):\n \"\"\"Given a list of states, return a list of final states\n contained in or derived from states in this list.\n \"\"\"\n checktype_seq(states, self.State)\n \n results = []\n for state in states:\n if state.is_done():\n results.append(state)\n else:\n next_states = state.step(deterministic=first_only)\n final_states = self.process(next_states)\n results.extend(final_states)\n return results\n \n def get_orders(self, clauses, init_bounds=(), first_only=False):\n \"\"\"Return all orders satisfying the heuristic.\n Non-deterministic choices are made when clauses are tied.\n \"\"\"\n init_state = self.State.get_initial(clauses, init_bounds,\n self.overrides)\n final_states = self.process([init_state], first_only=first_only)\n assert all(state.is_done() for state in final_states)\n results = [state.get_answer() for state in final_states]\n return results\n \n def get_order(self, clauses, init_bounds=()):\n \"\"\"Return a single order.\"\"\"\n return self.get_orders(clauses, init_bounds=init_bounds,\n first_only=True)[0]\n" }, { "alpha_fraction": 0.6471449732780457, "alphanum_fraction": 0.6544656157493591, "avg_line_length": 17.97222137451172, "blob_id": "71b395a17f8f8ce6ab1e2579fef2c2159a202c35", "content_id": "db533a127a1c22029b27ad698169b680e3f46dd8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 683, "license_type": "no_license", "max_line_length": 51, "num_lines": 36, "path": "/incoq/tests/programs/objcomp/autoflatten_in.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "# Only run object domain conversion when necessary.\n# Don't include input relations as parameters.\n\nfrom incoq.runtime import *\n\nOPTIONS(\n obj_domain = True,\n autodetect_input_rels = True,\n)\n\n\nR = Set()\nfor i in range(1, 5):\n R.add(i)\n\nQUERYOPTIONS(\n '{x for x in R}',\n uset_mode = 'none',\n impl = 'inc',\n)\n\nprint(sorted({x for x in R}))\nR.remove(3)\nprint(sorted({x for x in R}))\n\n# Don't M-flatten since the M-set is never used.\n# Test this with an update to a set that can't be\n# classified as a relation due to aliasing.\nS = Set()\n# This should even be eliminated as dead code.\ns = S\ns.add(1)\n\n# Don't F-flatten objects since they're not used.\no = Obj()\no.a = 1\n" }, { "alpha_fraction": 0.5020408034324646, "alphanum_fraction": 0.5428571701049805, "avg_line_length": 14.3125, "blob_id": "ac9ca8251bbb79c1d79c492ae2f3a6cf7fd1e141", "content_id": "1da7d2423ebb5f380bc37df006658d0b0899bb4b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 245, "license_type": "no_license", "max_line_length": 51, "num_lines": 16, "path": "/incoq/tests/programs/aggr/tuple_in.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "# Aggregates over sets of tuples.\n\nfrom incoq.runtime import *\n\nOPTIONS(\n default_impl = 'inc',\n)\n\nR = Set()\n\nfor (x, y, z) in [(1, 2, 3), (1, 4, 5), (6, 7, 8)]:\n R.add((x, y, z))\n\na = 1\nprint(count(R))\nprint(count(setmatch(R, 'buu', a)))\n" }, { "alpha_fraction": 0.47669512033462524, "alphanum_fraction": 0.4768747091293335, "avg_line_length": 30.72364616394043, "blob_id": "1baf7c08a87cd339e24e5ea7778f14ed9c457acb", "content_id": "3a123f90e1e46a4e599315c3d8bb99bc12e8867a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11135, "license_type": "no_license", "max_line_length": 76, "num_lines": 351, "path": "/incoq/compiler/obj/domaintrans.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Object-domain / pair-domain translation.\"\"\"\n\n\n__all__ = [\n 'to_pairdomain',\n 'to_objdomain',\n]\n\n\nfrom incoq.util.collections import OrderedSet\nimport incoq.compiler.incast as L\n\nfrom .pairrel import (make_mrel, is_mrel, make_frel, is_frel,\n get_frel_field, make_maprel, is_maprel,\n is_specialrel)\nfrom .objcomp import flatten_comp, unflatten_comp\n\n\nclass UpdateToPairTransformer(L.NodeTransformer):\n \n \"\"\"Convert updates to the relational domain.\"\"\"\n \n def __init__(self, use_mset, fields, use_mapset, input_rels):\n super().__init__()\n self.use_mset = use_mset\n self.fields = OrderedSet(fields)\n self.use_mapset = use_mapset\n self.input_rels = input_rels\n \n def visit_Module(self, node):\n node = self.generic_visit(node)\n \n decls = ()\n if self.use_mset:\n decls += L.pc('''\n M = MSet()\n ''', subst={'M': L.sn(make_mrel())})\n for field in self.fields:\n decls += L.pc('''\n F = FSet()\n ''', subst={'F': L.sn(make_frel(field))})\n if self.use_mapset:\n decls += L.pc('''\n MAP = MAPSet()\n ''', subst={'MAP': L.sn(make_maprel())})\n \n node = node._replace(body=decls + node.body)\n return node\n \n def visit_Assign(self, node):\n node = self.generic_visit(node)\n \n if L.is_attrassign(node):\n cont, field, value = L.get_attrassign(node)\n if field not in self.fields:\n return node\n return L.pc('''\n FSET.add((CONT, VALUE))\n ''', subst={'FSET': L.ln(make_frel(field)),\n 'CONT': cont,\n 'VALUE': value})\n \n else:\n return node\n \n def visit_Delete(self, node):\n node = self.generic_visit(node)\n \n if L.is_delattr(node):\n cont, field = L.get_delattr(node)\n if field not in self.fields:\n return node\n return L.pc('''\n FSET.remove((CONT, CONT.FIELD))\n ''', subst={'FSET': L.ln(make_frel(field)),\n 'CONT': cont,\n '@FIELD': field})\n \n else:\n return node\n \n def visit_SetUpdate(self, node):\n node = self.generic_visit(node)\n \n if not self.use_mset:\n return node\n \n # Ignore updates to input relations.\n if (isinstance(node.target, L.Name) and\n node.target.id in self.input_rels):\n return node\n \n code = L.pc('''\n M.OP((CONT, ELEM))\n ''', subst={'M': L.ln(make_mrel()),\n '@OP': node.op,\n 'CONT': node.target,\n 'ELEM': node.elem})\n return code\n \n def visit_AssignKey(self, node):\n node = self.generic_visit(node)\n \n if not self.use_mapset:\n return node\n \n code = L.pc('''\n MAPSET.add((TARGET, KEY, VALUE))\n ''', subst={'MAPSET': L.ln(make_maprel()),\n 'TARGET': node.target,\n 'KEY': node.key,\n 'VALUE': node.value})\n return code\n \n def visit_DelKey(self, node):\n node = self.generic_visit(node)\n \n if not self.use_mapset:\n return node\n \n code = L.pc('''\n MAPSET.remove((TARGET, KEY, TARGET[KEY]))\n ''', subst={'MAPSET': L.ln(make_maprel()),\n 'TARGET': node.target,\n 'KEY': node.key})\n return code\n\n\nclass UpdateToObjTransformer(L.NodeTransformer):\n \n \"\"\"Convert updates to the object domain.\"\"\"\n \n def __init__(self, namegen):\n super().__init__()\n self.namegen = namegen\n \n def visit_Assign(self, node):\n # Get rid of pair relation declarations.\n \n node = self.generic_visit(node)\n \n if L.is_varassign(node):\n name, _value = L.get_varassign(node)\n if is_specialrel(name):\n return ()\n \n return node\n \n def visit_SetUpdate(self, node):\n node = self.generic_visit(node)\n \n if not node.is_varupdate():\n return node\n rel = node.target.id\n elem = node.elem\n \n if not is_specialrel(rel):\n return node\n \n code = ()\n \n # Insert decomposition of element if it's not\n # an AST of a tuple of the right arity.\n if is_mrel(rel) or is_frel(rel):\n if isinstance(elem, L.Tuple) and len(elem.elts) == 2:\n cont, item = elem.elts\n else:\n prefix = self.namegen.next_prefix()\n cont = prefix + 'cont'\n item = prefix + 'item'\n code += L.pc('''\n CONT, ITEM = ELEM\n ''', subst={'CONT': cont,\n 'ITEM': item,\n 'ELEM': elem})\n elif is_maprel(rel):\n if isinstance(elem, L.Tuple) and len(elem.elts) == 3:\n map, key, value = elem.elts\n else:\n prefix = self.namegen.next_prefix()\n map = prefix + 'map'\n key = prefix + 'key'\n value = prefix + 'value'\n code = L.pc('''\n MAP, KEY, VALUE = ELEM\n ''', subst={'MAP': map,\n 'KEY': key,\n 'VALUE': value,\n 'ELEM': elem})\n \n if is_mrel(rel):\n code += L.pc('''\n CONT.OP(ITEM)\n ''', subst={'CONT': cont,\n '@OP': node.op,\n 'ITEM': item})\n elif is_frel(rel):\n field = get_frel_field(rel)\n if node.op == 'add':\n code += L.pc('''\n CONT.FIELD = ITEM\n ''', subst={'CONT': cont,\n '@FIELD': field,\n 'ITEM': item})\n elif node.op == 'remove':\n code += L.pc('''\n del CONT.FIELD\n ''', subst={'CONT': cont,\n '@FIELD': field})\n else:\n assert()\n \n elif is_maprel(rel):\n if node.op == 'add':\n code += L.pc('''\n MAP[KEY] = VALUE\n ''', subst={'MAP': map,\n 'KEY': key,\n 'VALUE': value})\n elif node.op == 'remove':\n code += L.pc('''\n del MAP[KEY]\n ''', subst={'MAP': map,\n 'KEY': key})\n else:\n assert()\n \n else:\n assert()\n \n return code\n\n\ndef flatten_all_comps(tree, input_rels):\n \"\"\"Flatten all object comprehensions in the program, and return\n a tuple of the new tree, a boolean for whether the M-set was\n used, an OrderedSet of all fields replaced, and a boolean for\n whether the MAP set was used.\n \"\"\"\n class Flattener(L.QueryMapper):\n def process(self, tree):\n self.use_mset = False\n self.fields = OrderedSet()\n self.use_mapset = False\n tree = super().process(tree)\n return tree, self.use_mset, self.fields, self.use_mapset\n \n def map_Comp(self, node):\n new_comp, new_mset, new_fields, new_mapset = \\\n flatten_comp(node, input_rels)\n self.use_mset |= new_mset\n self.fields.update(new_fields)\n self.use_mapset |= new_mapset\n return new_comp\n \n return Flattener.run(tree)\n\ndef unflatten_all_comps(tree):\n \"\"\"Unflatten all object comprehensions in the program, and return\n the new tree.\n \"\"\"\n class Unflattener(L.QueryMapper):\n def map_Comp(self, node):\n return unflatten_comp(node)\n \n return Unflattener.run(tree)\n\n\ndef is_retrievalchain(node):\n \"\"\"Return whether the given node is a chain of retrievals\n such as a[b.c].d. Trivially, a single Name is a chain.\n \"\"\"\n if isinstance(node, L.Name):\n return True\n elif isinstance(node, L.Attribute):\n return is_retrievalchain(node.value)\n elif isinstance(node, L.Subscript):\n if not isinstance(node.slice, L.Index):\n return False\n return (is_retrievalchain(node.value) and\n is_retrievalchain(node.slice.value))\n else:\n return False\n\ndef get_retrieval_params(node):\n \"\"\"Return a tuple of parameters in a retrieval chain, e.g.\n ('a', 'b') for a[b.c].d.\n \"\"\"\n assert is_retrievalchain(node)\n \n params = ()\n class Vis(L.NodeVisitor):\n def visit_Name(self, node):\n nonlocal params\n params += (node.id,)\n Vis.run(node)\n \n return params\n\n\nclass AggregatePreprocessor(L.NodeTransformer):\n \n \"\"\"Expand aggregates of variables or retrievals into\n aggregates of object-domain comprehensions.\n \"\"\"\n \n def visit_Aggregate(self, node):\n node = self.generic_visit(node)\n \n operand = node.value\n if isinstance(operand, L.Comp):\n return node\n if not is_retrievalchain(operand):\n # Bailout, looks like we won't be able to incrementalize\n # this later anyway.\n return node\n \n # Replace with {_e for _e in OPERAND}.\n # This case is for both single vars and retrieval chains.\n # The comp's options are inherited from the aggregate.\n params = get_retrieval_params(operand)\n elem = '_e'\n clause = L.Enumerator(target=L.sn(elem),\n iter=operand)\n node = node._replace(value=L.Comp(resexp=L.ln(elem),\n clauses=(clause,),\n params=params,\n options=node.options))\n return node\n\n\ndef to_pairdomain(tree, manager, input_rels):\n \"\"\"Convert a program to the pair domain, rewriting updates and\n queries.\n \"\"\"\n tree = AggregatePreprocessor.run(tree)\n tree, use_mset, fields, use_mapset = flatten_all_comps(tree, input_rels)\n tree = UpdateToPairTransformer.run(tree, use_mset, fields, use_mapset,\n input_rels)\n manager.use_mset = use_mset\n manager.fields = list(fields)\n manager.use_mapset = use_mapset\n return tree\n\ndef to_objdomain(tree, manager):\n \"\"\"Convert a program to the object domain, rewriting updates and\n queries.\n \"\"\"\n tree = UpdateToObjTransformer.run(tree, manager.namegen)\n tree = unflatten_all_comps(tree)\n return tree\n" }, { "alpha_fraction": 0.46904513239860535, "alphanum_fraction": 0.506820559501648, "avg_line_length": 27.02941131591797, "blob_id": "fee5f14d908a78dfcfc81ef9c544c6d7f415cde4", "content_id": "2644aff115a4b2319fd163ee387b7c26b9f10860", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 953, "license_type": "no_license", "max_line_length": 76, "num_lines": 34, "path": "/incoq/tests/programs/comp/implmode_in.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "# Alternative implementation strategies for comprehensions.\n\nfrom incoq.runtime import *\n\nE = Set()\n\nfor v1, v2 in {(1, 2), (2, 3), (3, 4), (4, 5)}:\n E.add((v1, v2))\n\nQUERYOPTIONS(\n '{(x, z) for (x, y) in E for (y2, z) in E if y == y2 if z > 1}',\n impl = 'batch',\n)\nprint(sorted({(x, z) for (x, y) in E for (y2, z) in E if y == y2 if z > 1}))\n\nQUERYOPTIONS(\n '{(x, z) for (x, y) in E for (y2, z) in E if y == y2 if z > 2}',\n impl = 'auxonly',\n)\nprint(sorted({(x, z) for (x, y) in E for (y2, z) in E if y == y2 if z > 2}))\n\nQUERYOPTIONS(\n '{(x, z) for (x, y) in E for (y2, z) in E if y == y2 if z > 3}',\n impl = 'inc',\n maint_impl = 'batch',\n)\nprint(sorted({(x, z) for (x, y) in E for (y2, z) in E if y == y2 if z > 3}))\n\nQUERYOPTIONS(\n '{(x, z) for (x, y) in E for (y2, z) in E if y == y2 if z > 4}',\n impl = 'inc',\n maint_impl = 'auxonly',\n)\nprint(sorted({(x, z) for (x, y) in E for (y2, z) in E if y == y2 if z > 4}))\n" }, { "alpha_fraction": 0.5949702262878418, "alphanum_fraction": 0.5982057452201843, "avg_line_length": 24.902856826782227, "blob_id": "cc1045fdf33376f79ec11a8bd09cd1210025c86d", "content_id": "637d555a8982a0365cc0c12d56ac028c5a463a3c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13599, "license_type": "no_license", "max_line_length": 71, "num_lines": 525, "path": "/incoq/compiler/incast/helpers.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Utility functions for specific AST construction and pattern-matching\noperations.\n\"\"\"\n\n\n__all__ = [\n 'ln',\n 'sn',\n 'dn',\n 'tuplify',\n 'cmp',\n 'cmpin',\n 'cmpnotin',\n 'cmpeq',\n 'cmpnoteq',\n 'plainfuncdef',\n 'get_plainfuncdef',\n 'is_plainfuncdef',\n \n 'get_varassign',\n 'is_varassign',\n 'get_vartuple',\n 'is_vartuple',\n 'get_name',\n 'is_name',\n 'get_cmp',\n 'is_cmp',\n 'get_vareqcmp',\n 'is_vareqcmp',\n 'get_singletonset',\n 'is_singletonset',\n 'get_singadd',\n 'is_singadd',\n 'get_singsub',\n 'is_singsub',\n 'get_namematch',\n 'is_namematch',\n 'get_namesmlookup',\n 'is_namesmlookup',\n 'get_attrassign',\n 'is_attrassign',\n 'get_delattr',\n 'is_delattr',\n 'get_mapassign',\n 'is_mapassign',\n 'get_delmap',\n 'is_delmap',\n 'get_importstar',\n 'is_importstar',\n 'get_setunion',\n 'is_setunion',\n 'get_plaincall',\n 'is_plaincall',\n]\n\n\nfrom functools import partial, wraps\nfrom iast import trim\nfrom simplestruct.type import checktype, checktype_seq\n\nfrom .nodes import *\nfrom .structconv import NodeVisitor\n\n\n# Construction helpers.\n\n\ndef ln(id):\n checktype(id, str)\n return Name(id, Load())\n\ndef sn(id):\n checktype(id, str)\n return Name(id, Store())\n\ndef dn(id):\n checktype(id, str)\n return Name(id, Del())\n\n\ndef tuplify(components, lval=False):\n \"\"\"Wrap a sequence of components in a Tuple node.\n \n Each component may be either an AST expression node or an\n identifier that gets preprocessed into a Name node.\n \n The Tuple and Name nodes are created with Load context if lval is\n False and Store context if lval is True.\n \n If there is exactly one component, it is returned rather than\n a singleton Tuple wrapping it.\n \n If there are zero components, then an empty Tuple is returned if\n lval is False and a wildcard Name is returned if lval is True.\n (This is because an empty Tuple is syntactically invalid in Store\n context, e.g. \"for () in S\" is not allowed.)\n \"\"\"\n ctxcls = Store if lval else Load\n \n orig, components = components, []\n for comp in orig:\n if isinstance(comp, str):\n components.append(Name(comp, ctxcls()))\n else:\n components.append(comp)\n components = tuple(components)\n checktype_seq(components, AST)\n \n if len(components) == 0 and lval:\n return Name('_', Store())\n elif len(components) == 1:\n return components[0]\n else:\n return Tuple(components, ctxcls())\n\n\ndef cmp(left, op, right):\n \"\"\"Comparison with only two operands.\"\"\"\n checktype(left, AST)\n checktype(op, AST)\n checktype(right, AST)\n \n return Compare(left, (op,), (right,))\n\ndef cmp_(left, right, op_kind):\n \"\"\"Helper for cmp*.\"\"\"\n return cmp(left, op_kind(), right)\n\ncmpin = partial(cmp_, op_kind=In)\ncmpnotin = partial(cmp_, op_kind=NotIn)\ncmpeq = partial(cmp_, op_kind=Eq)\ncmpnoteq = partial(cmp_, op_kind=NotEq)\n\ndef plainfuncdef(name, argnames, body):\n \"\"\"FunctionDef with no fancy arguments. Wrapped in a tuple\n (i.e., code, not stmt).\n \"\"\"\n processed_args = tuple(arg(a, None) for a in argnames)\n processed_arguments = arguments(processed_args, None, (),\n (), None, ())\n funcdef = FunctionDef(name, processed_arguments, body, (), None)\n return (funcdef,)\n\ndef get_plainfuncdef(func):\n \"\"\"Return the name, tuple of arg names, and body of a plain\n function definition.\n \"\"\"\n if not is_plainfuncdef(func):\n raise TypeError\n \n args = tuple(a.arg for a in func.args.args)\n return (func.name, args, func.body)\n\ndef is_plainfuncdef(func):\n \"\"\"Returns True if the function has no fancy arguments.\n (Note that func is a FunctionDef statement, not code.\n \"\"\"\n checktype(func, FunctionDef)\n plain_args = tuple(arg(a.arg, None) for a in func.args.args)\n plain_arguments = arguments(plain_args, None, (),\n (), None, ())\n return func.args == plain_arguments\n\n\n# Pattern helpers.\n\n\ndef isify(func):\n \"\"\"Autogenerate the \"is_\" functions.\"\"\"\n @wraps(func)\n def f(node):\n try:\n func(node)\n return True\n except TypeError:\n return False\n \n f.__doc__ = trim(\n \"\"\"\n Returns True if node fits the form matched by {},\n False otherwise.\n \"\"\".format(func.__name__))\n \n return f\n\ndef get_varassign(node):\n \"\"\"Match an Assign node of form\n \n <Name> = <value>\n \n and return Name.id and value.\n \"\"\"\n checktype(node, AST)\n \n if (isinstance(node, Assign) and\n len(node.targets) == 1 and\n isinstance(node.targets[0], Name)):\n return node.targets[0].id, node.value\n \n from . import ts\n raise TypeError('get_varassign failed: ' + ts(node))\n\nis_varassign = isify(get_varassign)\n\ndef get_vartuple(node):\n \"\"\"Match a Name or Tuple of Names and return a tuple of the\n identifiers.\n \"\"\"\n checktype(node, AST)\n \n if isinstance(node, Name):\n return (node.id,)\n elif (isinstance(node, Tuple) and\n all(isinstance(item, Name) for item in node.elts)):\n return tuple(item.id for item in node.elts)\n \n from . import ts\n raise TypeError('get_vartuple failed: ' + ts(node))\n\nis_vartuple = isify(get_vartuple)\n\ndef get_name(node):\n \"\"\"Match a Name node and return the identifier.\"\"\"\n checktype(node, AST)\n \n if isinstance(node, Name):\n return node.id\n \n from . import ts\n raise TypeError('get_name failed: ' + ts(node))\n\nis_name = isify(get_name)\n\ndef get_cmp(node):\n \"\"\"Match a Compare node of two operands, and return a triple\n of the first operand, the operation, and the second operand.\n \"\"\"\n checktype(node, AST)\n \n if (isinstance(node, Compare) and\n len(node.ops) == len(node.comparators) == 1):\n return node.left, node.ops[0], node.comparators[0]\n \n from . import ts\n raise TypeError('get_cmp failed: ' + ts(node))\n\nis_cmp = isify(get_cmp)\n\ndef get_vareqcmp(node):\n \"\"\"Match a Compare node of form\n \n <Name 1> == <Name 2> == ... == <Name n>\n \n and return a tuple of the identifiers.\n \"\"\"\n checktype(node, AST)\n \n if (isinstance(node, Compare) and\n all(isinstance(op, Eq) for op in node.ops) and\n isinstance(node.left, Name) and\n all(isinstance(c, Name) for c in node.comparators)):\n return (node.left.id,) + tuple(c.id for c in node.comparators)\n \n from . import ts\n raise TypeError('get_vareqcmp failed: ' + ts(node))\n\nis_vareqcmp = isify(get_vareqcmp)\n\ndef get_singletonset(node):\n \"\"\"Match a singleton set, i.e.\n \n {val}\n \n and return val.\n \"\"\"\n checktype(node, AST)\n \n if isinstance(node, Set) and len(node.elts) == 1:\n return node.elts[0]\n \n from . import ts\n raise TypeError('get_singletonset failed: ' + ts(node))\n\nis_singletonset = isify(get_singletonset)\n\ndef get_singadd(node):\n \"\"\"Match a singleton set added to an expression, i.e.\n \n <expr1> + {<expr2>}\n \n and return the two expressions.\n \"\"\"\n checktype(node, AST)\n \n if (isinstance(node, BinOp) and\n isinstance(node.op, Add) and\n isinstance(node.right, Set) and\n len(node.right.elts) == 1):\n return node.left, node.right.elts[0]\n \n from . import ts\n raise TypeError('get_singsub failed: ' + ts(node))\n\nis_singadd = isify(get_singadd)\n\ndef get_singsub(node):\n \"\"\"Match a singleton set subtracted from an expression, i.e.\n \n <expr1> - {<expr2>}\n \n and return the two expressions.\n \"\"\"\n checktype(node, AST)\n \n if (isinstance(node, BinOp) and\n isinstance(node.op, Sub) and\n isinstance(node.right, Set) and\n len(node.right.elts) == 1):\n return node.left, node.right.elts[0]\n \n from . import ts\n raise TypeError('get_singsub failed: ' + ts(node))\n\nis_singsub = isify(get_singsub)\n\ndef get_namematch(node):\n \"\"\"Match a SetMatch over a Name, and return a triple of the name,\n mask, and key.\n \"\"\"\n checktype(node, SetMatch)\n \n if isinstance(node.target, Name):\n return (node.target.id, node.mask, node.key)\n \n from . import ts\n raise TypeError('get_namematch failed: ' + ts(node))\n\nis_namematch = isify(get_namematch)\n\ndef get_namesmlookup(node):\n \"\"\"Match a SMLookup node over a name, and return a triple of the\n name, mask, and key.\n \"\"\"\n checktype(node, SMLookup)\n \n if isinstance(node.target, Name):\n return (node.target.id, node.mask, node.key)\n \n from . import ts\n raise TypeError('get_namesmlookup failed: ' + ts(node))\n\nis_namesmlookup = isify(get_namesmlookup)\n\ndef get_attrassign(node):\n \"\"\"Match an Assign node of form\n \n <alpha>.<attr> = <value>\n \n and returns alpha, attr, and value.\n \"\"\"\n checktype(node, AST)\n \n if (isinstance(node, Assign) and\n len(node.targets) == 1 and\n isinstance(node.targets[0], Attribute)):\n return node.targets[0].value, node.targets[0].attr, node.value\n \n from . import ts\n raise TypeError('get_attrassign failed: ' + ts(node))\n\nis_attrassign = isify(get_attrassign)\n\ndef get_delattr(node):\n \"\"\"Match a Delete node of form\n \n del <alpha>.<attr>\n \n and return alpha and attr.\n \"\"\"\n checktype(node, AST)\n \n if (isinstance(node, Delete) and\n len(node.targets) == 1 and\n isinstance(node.targets[0], Attribute)):\n return node.targets[0].value, node.targets[0].attr\n \n from . import ts\n raise TypeError('get_delattr failed: ' + ts(node))\n\nis_delattr = isify(get_delattr)\n\ndef get_mapassign(node):\n \"\"\"Match an Assign node of form\n \n <alpha>[<beta>] = <value>\n \n and returns alpha, beta, and value. As a special case, if alpha\n is the function call \"globals()\", do not match.\n \"\"\"\n checktype(node, AST)\n \n if (isinstance(node, Assign) and\n len(node.targets) == 1 and\n isinstance(node.targets[0], Subscript) and\n isinstance(node.targets[0].slice, Index)):\n # Catch globals().\n target = node.targets[0].value\n if (isinstance(target, Call) and\n isinstance(target.func, Name) and\n target.func.id == 'globals'):\n pass\n else:\n return (node.targets[0].value, node.targets[0].slice.value,\n node.value)\n \n from . import ts\n raise TypeError('get_mapassign failed: ' + ts(node))\n\nis_mapassign = isify(get_mapassign)\n\ndef get_delmap(node):\n \"\"\"Match a Delete node of form\n \n del <alpha>[<beta>]\n \n and return alpha and beta. As a special case, if alphas is\n the function call \"globals()\", do not match.\n \"\"\"\n checktype(node, AST)\n \n if (isinstance(node, Delete) and\n len(node.targets) == 1 and\n isinstance(node.targets[0], Subscript) and\n isinstance(node.targets[0].slice, Index)):\n target = node.targets[0].value\n if (isinstance(target, Call) and\n isinstance(target.func, Name) and\n target.func.id == 'globals'):\n pass\n else:\n return node.targets[0].value, node.targets[0].slice.value\n \n from . import ts\n raise TypeError('get_delmap failed: ' + ts(node))\n\nis_delmap = isify(get_delmap)\n\ndef get_importstar(node):\n \"\"\"Match an import statement of form\n \n from <mod> import *\n \n and return mod.\n \"\"\"\n checktype(node, AST)\n \n if (isinstance(node, ImportFrom) and\n len(node.names) == 1 and\n node.names[0].name == '*' and\n node.level == 0):\n return node.module\n \n from . import ts\n raise TypeError('get_importstar failed: ' + ts(node))\n\nis_importstar = isify(get_importstar)\n\ndef get_setunion(node):\n \"\"\"Match a union of set literals, set comprehensions, and names,\n and return a tuple of the individual set expression ASTs.\n \"\"\"\n checktype(node, AST)\n \n class Flattener(NodeVisitor):\n \n # BinOp nodes are tree-structured. Flatten the tree.\n \n class Failure(BaseException):\n pass\n \n def process(self, tree):\n self.parts = []\n super().process(tree)\n return tuple(self.parts)\n \n def visit_BinOp(self, node):\n if not isinstance(node.op, BitOr):\n raise self.Failure\n self.visit(node.left)\n self.visit(node.right)\n \n def generic_visit(self, node):\n # Don't recurse. Just add this node as an operand.\n self.parts.append(node)\n \n try:\n parts = Flattener.run(node)\n if all(isinstance(p, (Set, Comp, Name))\n for p in parts):\n return parts\n except Flattener.Failure:\n pass\n \n from . import ts\n raise TypeError('get_setunion failed: ' + ts(node))\n\nis_setunion = isify(get_setunion)\n\ndef get_plaincall(node):\n \"\"\"Match a call of a Name node with only positional arguments.\n Return the function name and a tuple of the argument expressions.\n \"\"\"\n checktype(node, AST)\n \n if (isinstance(node, Call) and\n isinstance(node.func, Name) and\n node.keywords == () and\n node.starargs is None and\n node.kwargs is None):\n return node.func.id, node.args\n \n from . import ts\n raise TypeError('get_importstar failed: ' + ts(node))\n\nis_plaincall = isify(get_plaincall)\n" }, { "alpha_fraction": 0.4325903058052063, "alphanum_fraction": 0.5064966082572937, "avg_line_length": 42.13367462158203, "blob_id": "881e7a51e88877c68759420cf1353000733fdb96", "content_id": "c79e103382f6921a29ea7f3a3302689d4ba27303", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16778, "license_type": "no_license", "max_line_length": 143, "num_lines": 389, "path": "/incoq/tests/programs/deminc/nested_subdem_out.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from incoq.runtime import *\n# Comp1 := {(a, a, c) : a in _U_Comp1, (a, b) in E, (b, c) in E}\n# Comp8 := {(a, (x, z)) : a in _U_Comp8, (x, y) in E, (a, y, z) in Comp1}\n# Comp1_delta := {a : a in _U_Comp8, (x, y) in E}\n_m_Comp8_out = Map()\ndef _maint__m_Comp8_out_add(_e):\n (v27_1, v27_2) = _e\n if (v27_1 not in _m_Comp8_out):\n _m_Comp8_out[v27_1] = set()\n _m_Comp8_out[v27_1].add(v27_2)\n\ndef _maint__m_Comp8_out_remove(_e):\n (v28_1, v28_2) = _e\n _m_Comp8_out[v28_1].remove(v28_2)\n if (len(_m_Comp8_out[v28_1]) == 0):\n del _m_Comp8_out[v28_1]\n\n_m_E_out = Map()\ndef _maint__m_E_out_add(_e):\n (v25_1, v25_2) = _e\n if (v25_1 not in _m_E_out):\n _m_E_out[v25_1] = set()\n _m_E_out[v25_1].add(v25_2)\n\ndef _maint__m_E_out_remove(_e):\n (v26_1, v26_2) = _e\n _m_E_out[v26_1].remove(v26_2)\n if (len(_m_E_out[v26_1]) == 0):\n del _m_E_out[v26_1]\n\n_m_Comp1_ubu = Map()\ndef _maint__m_Comp1_ubu_add(_e):\n (v23_1, v23_2, v23_3) = _e\n if (v23_2 not in _m_Comp1_ubu):\n _m_Comp1_ubu[v23_2] = set()\n _m_Comp1_ubu[v23_2].add((v23_1, v23_3))\n\ndef _maint__m_Comp1_ubu_remove(_e):\n (v24_1, v24_2, v24_3) = _e\n _m_Comp1_ubu[v24_2].remove((v24_1, v24_3))\n if (len(_m_Comp1_ubu[v24_2]) == 0):\n del _m_Comp1_ubu[v24_2]\n\n_m_E_in = Map()\ndef _maint__m_E_in_add(_e):\n (v21_1, v21_2) = _e\n if (v21_2 not in _m_E_in):\n _m_E_in[v21_2] = set()\n _m_E_in[v21_2].add(v21_1)\n\ndef _maint__m_E_in_remove(_e):\n (v22_1, v22_2) = _e\n _m_E_in[v22_2].remove(v22_1)\n if (len(_m_E_in[v22_2]) == 0):\n del _m_E_in[v22_2]\n\n_m_Comp1_buu = Map()\ndef _maint__m_Comp1_buu_add(_e):\n (v19_1, v19_2, v19_3) = _e\n if (v19_1 not in _m_Comp1_buu):\n _m_Comp1_buu[v19_1] = set()\n _m_Comp1_buu[v19_1].add((v19_2, v19_3))\n\ndef _maint__m_Comp1_buu_remove(_e):\n (v20_1, v20_2, v20_3) = _e\n _m_Comp1_buu[v20_1].remove((v20_2, v20_3))\n if (len(_m_Comp1_buu[v20_1]) == 0):\n del _m_Comp1_buu[v20_1]\n\nComp1_delta = RCSet()\ndef _maint_Comp1_delta__U_Comp8_add(_e):\n # Iterate {(v11_a, v11_x, v11_y) : v11_a in deltamatch(_U_Comp8, 'b', _e, 1), (v11_x, v11_y) in E}\n v11_a = _e\n for (v11_x, v11_y) in E:\n if (v11_a not in Comp1_delta):\n Comp1_delta.add(v11_a)\n else:\n Comp1_delta.incref(v11_a)\n\ndef _maint_Comp1_delta_E_add(_e):\n # Iterate {(v13_a, v13_x, v13_y) : v13_a in _U_Comp8, (v13_x, v13_y) in deltamatch(E, 'bb', _e, 1)}\n (v13_x, v13_y) = _e\n for v13_a in _U_Comp8:\n if (v13_a not in Comp1_delta):\n Comp1_delta.add(v13_a)\n else:\n Comp1_delta.incref(v13_a)\n\nComp8 = RCSet()\ndef _maint_Comp8__U_Comp8_add(_e):\n # Iterate {(v5_a, v5_x, v5_y, v5_z) : v5_a in deltamatch(_U_Comp8, 'b', _e, 1), (v5_x, v5_y) in E, (v5_a, v5_y, v5_z) in Comp1}\n v5_a = _e\n for (v5_y, v5_z) in (_m_Comp1_buu[v5_a] if (v5_a in _m_Comp1_buu) else set()):\n for v5_x in (_m_E_in[v5_y] if (v5_y in _m_E_in) else set()):\n if ((v5_a, (v5_x, v5_z)) not in Comp8):\n Comp8.add((v5_a, (v5_x, v5_z)))\n # Begin maint _m_Comp8_out after \"Comp8.add((v5_a, (v5_x, v5_z)))\"\n _maint__m_Comp8_out_add((v5_a, (v5_x, v5_z)))\n # End maint _m_Comp8_out after \"Comp8.add((v5_a, (v5_x, v5_z)))\"\n else:\n Comp8.incref((v5_a, (v5_x, v5_z)))\n\ndef _maint_Comp8__U_Comp8_remove(_e):\n # Iterate {(v6_a, v6_x, v6_y, v6_z) : v6_a in deltamatch(_U_Comp8, 'b', _e, 1), (v6_x, v6_y) in E, (v6_a, v6_y, v6_z) in Comp1}\n v6_a = _e\n for (v6_y, v6_z) in (_m_Comp1_buu[v6_a] if (v6_a in _m_Comp1_buu) else set()):\n for v6_x in (_m_E_in[v6_y] if (v6_y in _m_E_in) else set()):\n if (Comp8.getref((v6_a, (v6_x, v6_z))) == 1):\n # Begin maint _m_Comp8_out before \"Comp8.remove((v6_a, (v6_x, v6_z)))\"\n _maint__m_Comp8_out_remove((v6_a, (v6_x, v6_z)))\n # End maint _m_Comp8_out before \"Comp8.remove((v6_a, (v6_x, v6_z)))\"\n Comp8.remove((v6_a, (v6_x, v6_z)))\n else:\n Comp8.decref((v6_a, (v6_x, v6_z)))\n\ndef _maint_Comp8_E_add(_e):\n # Iterate {(v7_a, v7_x, v7_y, v7_z) : v7_a in _U_Comp8, (v7_x, v7_y) in deltamatch(E, 'bb', _e, 1), (v7_a, v7_y, v7_z) in Comp1}\n (v7_x, v7_y) = _e\n for (v7_a, v7_z) in (_m_Comp1_ubu[v7_y] if (v7_y in _m_Comp1_ubu) else set()):\n if (v7_a in _U_Comp8):\n if ((v7_a, (v7_x, v7_z)) not in Comp8):\n Comp8.add((v7_a, (v7_x, v7_z)))\n # Begin maint _m_Comp8_out after \"Comp8.add((v7_a, (v7_x, v7_z)))\"\n _maint__m_Comp8_out_add((v7_a, (v7_x, v7_z)))\n # End maint _m_Comp8_out after \"Comp8.add((v7_a, (v7_x, v7_z)))\"\n else:\n Comp8.incref((v7_a, (v7_x, v7_z)))\n\ndef _maint_Comp8_E_remove(_e):\n # Iterate {(v8_a, v8_x, v8_y, v8_z) : v8_a in _U_Comp8, (v8_x, v8_y) in deltamatch(E, 'bb', _e, 1), (v8_a, v8_y, v8_z) in Comp1}\n (v8_x, v8_y) = _e\n for (v8_a, v8_z) in (_m_Comp1_ubu[v8_y] if (v8_y in _m_Comp1_ubu) else set()):\n if (v8_a in _U_Comp8):\n if (Comp8.getref((v8_a, (v8_x, v8_z))) == 1):\n # Begin maint _m_Comp8_out before \"Comp8.remove((v8_a, (v8_x, v8_z)))\"\n _maint__m_Comp8_out_remove((v8_a, (v8_x, v8_z)))\n # End maint _m_Comp8_out before \"Comp8.remove((v8_a, (v8_x, v8_z)))\"\n Comp8.remove((v8_a, (v8_x, v8_z)))\n else:\n Comp8.decref((v8_a, (v8_x, v8_z)))\n\ndef _maint_Comp8_Comp1_add(_e):\n # Iterate {(v9_a, v9_x, v9_y, v9_z) : v9_a in _U_Comp8, (v9_x, v9_y) in E, (v9_a, v9_y, v9_z) in deltamatch(Comp1, 'bbb', _e, 1)}\n (v9_a, v9_y, v9_z) = _e\n if (v9_a in _U_Comp8):\n for v9_x in (_m_E_in[v9_y] if (v9_y in _m_E_in) else set()):\n if ((v9_a, (v9_x, v9_z)) not in Comp8):\n Comp8.add((v9_a, (v9_x, v9_z)))\n # Begin maint _m_Comp8_out after \"Comp8.add((v9_a, (v9_x, v9_z)))\"\n _maint__m_Comp8_out_add((v9_a, (v9_x, v9_z)))\n # End maint _m_Comp8_out after \"Comp8.add((v9_a, (v9_x, v9_z)))\"\n else:\n Comp8.incref((v9_a, (v9_x, v9_z)))\n\ndef _maint_Comp8_Comp1_remove(_e):\n # Iterate {(v10_a, v10_x, v10_y, v10_z) : v10_a in _U_Comp8, (v10_x, v10_y) in E, (v10_a, v10_y, v10_z) in deltamatch(Comp1, 'bbb', _e, 1)}\n (v10_a, v10_y, v10_z) = _e\n if (v10_a in _U_Comp8):\n for v10_x in (_m_E_in[v10_y] if (v10_y in _m_E_in) else set()):\n if (Comp8.getref((v10_a, (v10_x, v10_z))) == 1):\n # Begin maint _m_Comp8_out before \"Comp8.remove((v10_a, (v10_x, v10_z)))\"\n _maint__m_Comp8_out_remove((v10_a, (v10_x, v10_z)))\n # End maint _m_Comp8_out before \"Comp8.remove((v10_a, (v10_x, v10_z)))\"\n Comp8.remove((v10_a, (v10_x, v10_z)))\n else:\n Comp8.decref((v10_a, (v10_x, v10_z)))\n\n_U_Comp8 = RCSet()\n_UEXT_Comp8 = Set()\ndef demand_Comp8(a):\n '{(a, (x, z)) : a in _U_Comp8, (x, y) in E, (a, y, z) in Comp1}'\n if (a not in _U_Comp8):\n _U_Comp8.add(a)\n # Begin maint Comp1_delta after \"_U_Comp8.add(a)\"\n _maint_Comp1_delta__U_Comp8_add(a)\n # End maint Comp1_delta after \"_U_Comp8.add(a)\"\n # Begin maint Comp8 after \"_U_Comp8.add(a)\"\n _maint_Comp8__U_Comp8_add(a)\n # End maint Comp8 after \"_U_Comp8.add(a)\"\n # Begin maint demand_Comp1 after \"_U_Comp8.add(a)\"\n for v15_a in Comp1_delta.elements():\n demand_Comp1(v15_a)\n Comp1_delta.clear()\n # End maint demand_Comp1 after \"_U_Comp8.add(a)\"\n else:\n _U_Comp8.incref(a)\n\ndef undemand_Comp8(a):\n '{(a, (x, z)) : a in _U_Comp8, (x, y) in E, (a, y, z) in Comp1}'\n if (_U_Comp8.getref(a) == 1):\n # Begin maint Comp8 before \"_U_Comp8.remove(a)\"\n _maint_Comp8__U_Comp8_remove(a)\n # End maint Comp8 before \"_U_Comp8.remove(a)\"\n # Begin maint Comp1_delta before \"_U_Comp8.remove(a)\"\n _maint_Comp1_delta__U_Comp8_add(a)\n # End maint Comp1_delta before \"_U_Comp8.remove(a)\"\n _U_Comp8.remove(a)\n # Begin maint undemand_Comp1 after \"_U_Comp8.remove(a)\"\n for v16_a in Comp1_delta.elements():\n undemand_Comp1(v16_a)\n Comp1_delta.clear()\n # End maint undemand_Comp1 after \"_U_Comp8.remove(a)\"\n else:\n _U_Comp8.decref(a)\n\ndef query_Comp8(a):\n '{(a, (x, z)) : a in _U_Comp8, (x, y) in E, (a, y, z) in Comp1}'\n if (a not in _UEXT_Comp8):\n _UEXT_Comp8.add(a)\n demand_Comp8(a)\n return True\n\nComp1 = RCSet()\ndef _maint_Comp1__U_Comp1_add(_e):\n # Iterate {(v1_a, v1_b, v1_c) : v1_a in deltamatch(_U_Comp1, 'b', _e, 1), (v1_a, v1_b) in E, (v1_b, v1_c) in E}\n v1_a = _e\n for v1_b in (_m_E_out[v1_a] if (v1_a in _m_E_out) else set()):\n for v1_c in (_m_E_out[v1_b] if (v1_b in _m_E_out) else set()):\n if ((v1_a, v1_a, v1_c) not in Comp1):\n Comp1.add((v1_a, v1_a, v1_c))\n # Begin maint _m_Comp1_ubu after \"Comp1.add((v1_a, v1_a, v1_c))\"\n _maint__m_Comp1_ubu_add((v1_a, v1_a, v1_c))\n # End maint _m_Comp1_ubu after \"Comp1.add((v1_a, v1_a, v1_c))\"\n # Begin maint _m_Comp1_buu after \"Comp1.add((v1_a, v1_a, v1_c))\"\n _maint__m_Comp1_buu_add((v1_a, v1_a, v1_c))\n # End maint _m_Comp1_buu after \"Comp1.add((v1_a, v1_a, v1_c))\"\n # Begin maint Comp8 after \"Comp1.add((v1_a, v1_a, v1_c))\"\n _maint_Comp8_Comp1_add((v1_a, v1_a, v1_c))\n # End maint Comp8 after \"Comp1.add((v1_a, v1_a, v1_c))\"\n else:\n Comp1.incref((v1_a, v1_a, v1_c))\n\ndef _maint_Comp1__U_Comp1_remove(_e):\n # Iterate {(v2_a, v2_b, v2_c) : v2_a in deltamatch(_U_Comp1, 'b', _e, 1), (v2_a, v2_b) in E, (v2_b, v2_c) in E}\n v2_a = _e\n for v2_b in (_m_E_out[v2_a] if (v2_a in _m_E_out) else set()):\n for v2_c in (_m_E_out[v2_b] if (v2_b in _m_E_out) else set()):\n if (Comp1.getref((v2_a, v2_a, v2_c)) == 1):\n # Begin maint Comp8 before \"Comp1.remove((v2_a, v2_a, v2_c))\"\n _maint_Comp8_Comp1_remove((v2_a, v2_a, v2_c))\n # End maint Comp8 before \"Comp1.remove((v2_a, v2_a, v2_c))\"\n # Begin maint _m_Comp1_buu before \"Comp1.remove((v2_a, v2_a, v2_c))\"\n _maint__m_Comp1_buu_remove((v2_a, v2_a, v2_c))\n # End maint _m_Comp1_buu before \"Comp1.remove((v2_a, v2_a, v2_c))\"\n # Begin maint _m_Comp1_ubu before \"Comp1.remove((v2_a, v2_a, v2_c))\"\n _maint__m_Comp1_ubu_remove((v2_a, v2_a, v2_c))\n # End maint _m_Comp1_ubu before \"Comp1.remove((v2_a, v2_a, v2_c))\"\n Comp1.remove((v2_a, v2_a, v2_c))\n else:\n Comp1.decref((v2_a, v2_a, v2_c))\n\ndef _maint_Comp1_E_add(_e):\n v3_DAS = set()\n # Iterate {(v3_a, v3_b, v3_c) : v3_a in _U_Comp1, (v3_a, v3_b) in deltamatch(E, 'bb', _e, 1), (v3_b, v3_c) in E}\n (v3_a, v3_b) = _e\n if (v3_a in _U_Comp1):\n for v3_c in (_m_E_out[v3_b] if (v3_b in _m_E_out) else set()):\n if ((v3_a, v3_b, v3_c) not in v3_DAS):\n v3_DAS.add((v3_a, v3_b, v3_c))\n # Iterate {(v3_a, v3_b, v3_c) : v3_a in _U_Comp1, (v3_a, v3_b) in E, (v3_b, v3_c) in deltamatch(E, 'bb', _e, 1)}\n (v3_b, v3_c) = _e\n for v3_a in (_m_E_in[v3_b] if (v3_b in _m_E_in) else set()):\n if (v3_a in _U_Comp1):\n if ((v3_a, v3_b, v3_c) not in v3_DAS):\n v3_DAS.add((v3_a, v3_b, v3_c))\n for (v3_a, v3_b, v3_c) in v3_DAS:\n if ((v3_a, v3_a, v3_c) not in Comp1):\n Comp1.add((v3_a, v3_a, v3_c))\n # Begin maint _m_Comp1_ubu after \"Comp1.add((v3_a, v3_a, v3_c))\"\n _maint__m_Comp1_ubu_add((v3_a, v3_a, v3_c))\n # End maint _m_Comp1_ubu after \"Comp1.add((v3_a, v3_a, v3_c))\"\n # Begin maint _m_Comp1_buu after \"Comp1.add((v3_a, v3_a, v3_c))\"\n _maint__m_Comp1_buu_add((v3_a, v3_a, v3_c))\n # End maint _m_Comp1_buu after \"Comp1.add((v3_a, v3_a, v3_c))\"\n # Begin maint Comp8 after \"Comp1.add((v3_a, v3_a, v3_c))\"\n _maint_Comp8_Comp1_add((v3_a, v3_a, v3_c))\n # End maint Comp8 after \"Comp1.add((v3_a, v3_a, v3_c))\"\n else:\n Comp1.incref((v3_a, v3_a, v3_c))\n del v3_DAS\n\ndef _maint_Comp1_E_remove(_e):\n v4_DAS = set()\n # Iterate {(v4_a, v4_b, v4_c) : v4_a in _U_Comp1, (v4_a, v4_b) in deltamatch(E, 'bb', _e, 1), (v4_b, v4_c) in E}\n (v4_a, v4_b) = _e\n if (v4_a in _U_Comp1):\n for v4_c in (_m_E_out[v4_b] if (v4_b in _m_E_out) else set()):\n if ((v4_a, v4_b, v4_c) not in v4_DAS):\n v4_DAS.add((v4_a, v4_b, v4_c))\n # Iterate {(v4_a, v4_b, v4_c) : v4_a in _U_Comp1, (v4_a, v4_b) in E, (v4_b, v4_c) in deltamatch(E, 'bb', _e, 1)}\n (v4_b, v4_c) = _e\n for v4_a in (_m_E_in[v4_b] if (v4_b in _m_E_in) else set()):\n if (v4_a in _U_Comp1):\n if ((v4_a, v4_b, v4_c) not in v4_DAS):\n v4_DAS.add((v4_a, v4_b, v4_c))\n for (v4_a, v4_b, v4_c) in v4_DAS:\n if (Comp1.getref((v4_a, v4_a, v4_c)) == 1):\n # Begin maint Comp8 before \"Comp1.remove((v4_a, v4_a, v4_c))\"\n _maint_Comp8_Comp1_remove((v4_a, v4_a, v4_c))\n # End maint Comp8 before \"Comp1.remove((v4_a, v4_a, v4_c))\"\n # Begin maint _m_Comp1_buu before \"Comp1.remove((v4_a, v4_a, v4_c))\"\n _maint__m_Comp1_buu_remove((v4_a, v4_a, v4_c))\n # End maint _m_Comp1_buu before \"Comp1.remove((v4_a, v4_a, v4_c))\"\n # Begin maint _m_Comp1_ubu before \"Comp1.remove((v4_a, v4_a, v4_c))\"\n _maint__m_Comp1_ubu_remove((v4_a, v4_a, v4_c))\n # End maint _m_Comp1_ubu before \"Comp1.remove((v4_a, v4_a, v4_c))\"\n Comp1.remove((v4_a, v4_a, v4_c))\n else:\n Comp1.decref((v4_a, v4_a, v4_c))\n del v4_DAS\n\n_U_Comp1 = RCSet()\n_UEXT_Comp1 = Set()\ndef demand_Comp1(a):\n '{(a, a, c) : a in _U_Comp1, (a, b) in E, (b, c) in E}'\n if (a not in _U_Comp1):\n _U_Comp1.add(a)\n # Begin maint Comp1 after \"_U_Comp1.add(a)\"\n _maint_Comp1__U_Comp1_add(a)\n # End maint Comp1 after \"_U_Comp1.add(a)\"\n else:\n _U_Comp1.incref(a)\n\ndef undemand_Comp1(a):\n '{(a, a, c) : a in _U_Comp1, (a, b) in E, (b, c) in E}'\n if (_U_Comp1.getref(a) == 1):\n # Begin maint Comp1 before \"_U_Comp1.remove(a)\"\n _maint_Comp1__U_Comp1_remove(a)\n # End maint Comp1 before \"_U_Comp1.remove(a)\"\n _U_Comp1.remove(a)\n else:\n _U_Comp1.decref(a)\n\ndef query_Comp1(a):\n '{(a, a, c) : a in _U_Comp1, (a, b) in E, (b, c) in E}'\n if (a not in _UEXT_Comp1):\n _UEXT_Comp1.add(a)\n demand_Comp1(a)\n return True\n\nE = Set()\nfor (v1, v2) in [(1, 2), (2, 3), (3, 4), (4, 5)]:\n E.add((v1, v2))\n # Begin maint _m_E_out after \"E.add((v1, v2))\"\n _maint__m_E_out_add((v1, v2))\n # End maint _m_E_out after \"E.add((v1, v2))\"\n # Begin maint _m_E_in after \"E.add((v1, v2))\"\n _maint__m_E_in_add((v1, v2))\n # End maint _m_E_in after \"E.add((v1, v2))\"\n # Begin maint Comp1_delta after \"E.add((v1, v2))\"\n _maint_Comp1_delta_E_add((v1, v2))\n # End maint Comp1_delta after \"E.add((v1, v2))\"\n # Begin maint Comp8 after \"E.add((v1, v2))\"\n _maint_Comp8_E_add((v1, v2))\n # End maint Comp8 after \"E.add((v1, v2))\"\n # Begin maint Comp1 after \"E.add((v1, v2))\"\n _maint_Comp1_E_add((v1, v2))\n # End maint Comp1 after \"E.add((v1, v2))\"\n # Begin maint demand_Comp1 after \"E.add((v1, v2))\"\n for v17_a in Comp1_delta.elements():\n demand_Comp1(v17_a)\n Comp1_delta.clear()\n # End maint demand_Comp1 after \"E.add((v1, v2))\"\ndef query(a):\n print(sorted((query_Comp8(a) and (_m_Comp8_out[a] if (a in _m_Comp8_out) else set()))))\n\nquery(2)\n# Begin maint Comp1 before \"E.remove((1, 2))\"\n_maint_Comp1_E_remove((1, 2))\n# End maint Comp1 before \"E.remove((1, 2))\"\n# Begin maint Comp8 before \"E.remove((1, 2))\"\n_maint_Comp8_E_remove((1, 2))\n# End maint Comp8 before \"E.remove((1, 2))\"\n# Begin maint Comp1_delta before \"E.remove((1, 2))\"\n_maint_Comp1_delta_E_add((1, 2))\n# End maint Comp1_delta before \"E.remove((1, 2))\"\n# Begin maint _m_E_in before \"E.remove((1, 2))\"\n_maint__m_E_in_remove((1, 2))\n# End maint _m_E_in before \"E.remove((1, 2))\"\n# Begin maint _m_E_out before \"E.remove((1, 2))\"\n_maint__m_E_out_remove((1, 2))\n# End maint _m_E_out before \"E.remove((1, 2))\"\nE.remove((1, 2))\n# Begin maint undemand_Comp1 after \"E.remove((1, 2))\"\nfor v18_a in Comp1_delta.elements():\n undemand_Comp1(v18_a)\nComp1_delta.clear()\n# End maint undemand_Comp1 after \"E.remove((1, 2))\"\nquery(2)" }, { "alpha_fraction": 0.26966291666030884, "alphanum_fraction": 0.26966291666030884, "avg_line_length": 39.45454406738281, "blob_id": "c3471c367e169241c3fd81a0bbe2222af8d0c235", "content_id": "46c2647fc954bd842c49e3ae264611d88e046d8a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 445, "license_type": "no_license", "max_line_length": 79, "num_lines": 11, "path": "/incoq/compiler/central/__init__.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "###############################################################################\n# incoq.central #\n# Author: Jon Brandvein #\n###############################################################################\n\n\"\"\"Gathering point for different components of the transformation.\"\"\"\n\n\n# Exports.\nfrom .manager import *\nfrom .transform import *\n" }, { "alpha_fraction": 0.5192592740058899, "alphanum_fraction": 0.5274074077606201, "avg_line_length": 27.125, "blob_id": "98052786a436c0ecd46769caecc50a3bd8abbbf0", "content_id": "fe82c41dd1a996f2d5d58764c49ed1bf186358ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1350, "license_type": "no_license", "max_line_length": 63, "num_lines": 48, "path": "/incoq/tests/invinc/central/test_options.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Unit tests for options.py.\"\"\"\n\n\nimport unittest\n\nimport incoq.compiler.incast as L\nfrom incoq.compiler.central.options import *\n\n\nclass DummyManager(OptionsManager):\n normal_defaults = {'a': 'b', 'c': 'd'}\n query_defaults = {'e': 'f', 'g': 'h'}\n\n\nclass OptionsCase(unittest.TestCase):\n \n def test_basic(self):\n query1 = L.pe('COMP({x for x in S}, [S], {\"e\": \"f2\"})')\n query2 = L.pe('COMP({y for y in T}, [T], {})')\n \n # Test import/export.\n nopts = {'a': 'b2'}\n qopts = {query1: {'e': 'f2'}}\n o = DummyManager()\n o.import_opts(nopts, qopts)\n \n # Test retrievals.\n self.assertEqual(o.get_opt('a'), 'b2')\n self.assertEqual(o.get_opt('c'), 'd')\n self.assertEqual(o.get_queryopt(query1, 'e'), 'f2')\n self.assertEqual(o.get_queryopt(query1, 'g'), 'h')\n self.assertEqual(o.get_queryopt(query2, 'e'), 'f')\n \n def test_bad_opts(self):\n nopts = {'x': 'y'}\n o = DummyManager()\n with self.assertRaises(L.ProgramError):\n o.validate_nopts(nopts)\n \n query = L.pe('COMP({x for x in S}, [S], {})')\n qopts = {query: {'x': 'y'}}\n o = DummyManager()\n with self.assertRaises(L.ProgramError):\n o.validate_qopts(qopts)\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.4461578130722046, "alphanum_fraction": 0.5214597582817078, "avg_line_length": 36.786407470703125, "blob_id": "12d1d7ccc33b29701971faa3c5848d67ffae02af", "content_id": "3ac00d213b38e3e700d47cb22d23e1a824d8dba0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3891, "license_type": "no_license", "max_line_length": 98, "num_lines": 103, "path": "/incoq/tests/programs/aggr/comp_out.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from incoq.runtime import *\n# Comp1 := {(x, z) : (x, y) in E, (y, z) in E}\n# Aggr1 := sum(setmatch(Comp1, 'bu', x), None)\n_m_E_in = Map()\ndef _maint__m_E_in_add(_e):\n (v9_1, v9_2) = _e\n if (v9_2 not in _m_E_in):\n _m_E_in[v9_2] = set()\n _m_E_in[v9_2].add(v9_1)\n\n_m_E_out = Map()\ndef _maint__m_E_out_add(_e):\n (v7_1, v7_2) = _e\n if (v7_1 not in _m_E_out):\n _m_E_out[v7_1] = set()\n _m_E_out[v7_1].add(v7_2)\n\n_m_Aggr1_out = Map()\ndef _maint__m_Aggr1_out_add(_e):\n (v5_1, v5_2) = _e\n if (v5_1 not in _m_Aggr1_out):\n _m_Aggr1_out[v5_1] = set()\n _m_Aggr1_out[v5_1].add(v5_2)\n\ndef _maint__m_Aggr1_out_remove(_e):\n (v6_1, v6_2) = _e\n _m_Aggr1_out[v6_1].remove(v6_2)\n if (len(_m_Aggr1_out[v6_1]) == 0):\n del _m_Aggr1_out[v6_1]\n\ndef _maint_Aggr1_add(_e):\n (v3_v1, v3_v2) = _e\n v3_val = _m_Aggr1_out.singlelookup(v3_v1, (0, 0))\n (v3_state, v3_count) = v3_val\n v3_state = (v3_state + v3_v2)\n v3_val = (v3_state, (v3_count + 1))\n v3_1 = v3_v1\n if (not (len((_m_Aggr1_out[v3_v1] if (v3_v1 in _m_Aggr1_out) else set())) == 0)):\n v3_elem = _m_Aggr1_out.singlelookup(v3_v1)\n # Begin maint _m_Aggr1_out before \"Aggr1.remove((v3_1, v3_elem))\"\n _maint__m_Aggr1_out_remove((v3_1, v3_elem))\n # End maint _m_Aggr1_out before \"Aggr1.remove((v3_1, v3_elem))\"\n # Begin maint _m_Aggr1_out after \"Aggr1.add((v3_1, v3_val))\"\n _maint__m_Aggr1_out_add((v3_1, v3_val))\n # End maint _m_Aggr1_out after \"Aggr1.add((v3_1, v3_val))\"\n\ndef _maint_Aggr1_remove(_e):\n (v4_v1, v4_v2) = _e\n v4_val = _m_Aggr1_out.singlelookup(v4_v1)\n if (v4_val[1] == 1):\n v4_1 = v4_v1\n v4_elem = _m_Aggr1_out.singlelookup(v4_v1)\n # Begin maint _m_Aggr1_out before \"Aggr1.remove((v4_1, v4_elem))\"\n _maint__m_Aggr1_out_remove((v4_1, v4_elem))\n # End maint _m_Aggr1_out before \"Aggr1.remove((v4_1, v4_elem))\"\n else:\n (v4_state, v4_count) = v4_val\n v4_state = (v4_state - v4_v2)\n v4_val = (v4_state, (v4_count - 1))\n v4_1 = v4_v1\n v4_elem = _m_Aggr1_out.singlelookup(v4_v1)\n # Begin maint _m_Aggr1_out before \"Aggr1.remove((v4_1, v4_elem))\"\n _maint__m_Aggr1_out_remove((v4_1, v4_elem))\n # End maint _m_Aggr1_out before \"Aggr1.remove((v4_1, v4_elem))\"\n # Begin maint _m_Aggr1_out after \"Aggr1.add((v4_1, v4_val))\"\n _maint__m_Aggr1_out_add((v4_1, v4_val))\n # End maint _m_Aggr1_out after \"Aggr1.add((v4_1, v4_val))\"\n\nComp1 = RCSet()\ndef _maint_Comp1_E_add(_e):\n v1_DAS = set()\n # Iterate {(v1_x, v1_y, v1_z) : (v1_x, v1_y) in deltamatch(E, 'bb', _e, 1), (v1_y, v1_z) in E}\n (v1_x, v1_y) = _e\n for v1_z in (_m_E_out[v1_y] if (v1_y in _m_E_out) else set()):\n if ((v1_x, v1_y, v1_z) not in v1_DAS):\n v1_DAS.add((v1_x, v1_y, v1_z))\n # Iterate {(v1_x, v1_y, v1_z) : (v1_x, v1_y) in E, (v1_y, v1_z) in deltamatch(E, 'bb', _e, 1)}\n (v1_y, v1_z) = _e\n for v1_x in (_m_E_in[v1_y] if (v1_y in _m_E_in) else set()):\n if ((v1_x, v1_y, v1_z) not in v1_DAS):\n v1_DAS.add((v1_x, v1_y, v1_z))\n for (v1_x, v1_y, v1_z) in v1_DAS:\n if ((v1_x, v1_z) not in Comp1):\n Comp1.add((v1_x, v1_z))\n # Begin maint Aggr1 after \"Comp1.add((v1_x, v1_z))\"\n _maint_Aggr1_add((v1_x, v1_z))\n # End maint Aggr1 after \"Comp1.add((v1_x, v1_z))\"\n else:\n Comp1.incref((v1_x, v1_z))\n del v1_DAS\n\nfor e in [(1, 2), (2, 3), (2, 4), (3, 5)]:\n # Begin maint _m_E_in after \"E.add(e)\"\n _maint__m_E_in_add(e)\n # End maint _m_E_in after \"E.add(e)\"\n # Begin maint _m_E_out after \"E.add(e)\"\n _maint__m_E_out_add(e)\n # End maint _m_E_out after \"E.add(e)\"\n # Begin maint Comp1 after \"E.add(e)\"\n _maint_Comp1_E_add(e)\n # End maint Comp1 after \"E.add(e)\"\nx = 1\nprint(_m_Aggr1_out.singlelookup(x, (0, 0))[0])" }, { "alpha_fraction": 0.4340053200721741, "alphanum_fraction": 0.44313427805900574, "avg_line_length": 28.52808952331543, "blob_id": "b07e0ea7262ad72e28979b702f5f87dd55b11360", "content_id": "3948cc6b869100db49f01ab01cb5d3caea52a947", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2629, "license_type": "no_license", "max_line_length": 66, "num_lines": 89, "path": "/incoq/tests/invinc/incast/test_macros.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Unit tests for macros.py.\"\"\"\n\n\nimport unittest\n\nfrom incoq.compiler.incast.nodes import *\nfrom incoq.compiler.incast.structconv import parse_structast\nfrom incoq.compiler.incast.macros import *\n\n\nclass MacroCase(unittest.TestCase):\n \n def p(self, source, mode=None, subst=None):\n tree = parse_structast(source, mode=mode, subst=subst)\n return IncMacroProcessor.run(tree)\n \n def pe(self, source):\n return self.p(source, mode='expr')\n \n def test_IncMacro(self):\n tree = self.p('S.nsadd(x)')\n update_node = SetUpdate(self.pe('S'), 'add', self.pe('x'))\n exp_tree = self.p('''\n if x not in S:\n UPDATE\n ''', subst={'<c>UPDATE': update_node})\n self.assertEqual(tree, exp_tree)\n \n def test_setmap(self):\n tree = self.p('S.smassignkey(\"bbu\", k, v, \"_\")')\n exp_tree = self.p('''\n (_1, _2) = k\n S.add((_1, _2, v))\n ''')\n self.assertEqual(tree, exp_tree)\n \n tree = self.p('S.smdelkey(\"bbu\", k, \"_\")')\n exp_tree = self.p('''\n (_1, _2) = k\n _elem = S.smlookup(\"bbu\", k)\n S.remove((_1, _2, _elem))\n ''')\n self.assertEqual(tree, exp_tree)\n \n tree = self.p('S.smnsassignkey(\"bbu\", k, v, \"_\")')\n exp_tree = self.p('''\n (_1, _2) = k\n if not setmatch(S, \"bbu\", k).isempty():\n _elem = S.smlookup(\"bbu\", k)\n S.remove((_1, _2, _elem))\n S.add((_1, _2, v))\n ''')\n self.assertEqual(tree, exp_tree)\n \n tree = self.p('S.smnsdelkey(\"bbu\", k, \"_\")')\n exp_tree = self.p('''\n if not setmatch(S, \"bbu\", k).isempty():\n (_1, _2) = k\n _elem = S.smlookup(\"bbu\", k)\n S.remove((_1, _2, _elem))\n ''')\n self.assertEqual(tree, exp_tree)\n \n tree = self.p('S.smreassignkey(\"bbu\", k, v, \"_\")')\n exp_tree = self.p('''\n (_1, _2) = k\n _elem = S.smlookup(\"bbu\", k)\n S.remove((_1, _2, _elem))\n S.add((_1, _2, v))\n ''')\n self.assertEqual(tree, exp_tree)\n \n def test_fields(self):\n tree = self.p('''\n o.nsassignfield(f, v)\n o.nsdelfield(g)\n ''')\n exp_tree = self.p('''\n if hasattr(o, 'f'):\n del o.f\n o.f = v\n if hasattr(o, 'g'):\n del o.g\n ''')\n self.assertEqual(tree, exp_tree)\n\n\nif __name__ == '__main__':\n unittest.main()\n\n" }, { "alpha_fraction": 0.41505154967308044, "alphanum_fraction": 0.476701021194458, "avg_line_length": 41.18260955810547, "blob_id": "99bbc9266e6d2353d93a70afbb823002d67503ee", "content_id": "b091bbbd920e622847b96088a58054a3f307ac58", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4850, "license_type": "no_license", "max_line_length": 165, "num_lines": 115, "path": "/incoq/tests/programs/objcomp/map_out.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from incoq.runtime import *\n# Comp1 := {(m, k, o_i) : (m, k, m_m_k_k) in _MAP, (m_m_k_k, o) in _M, (o, o_i) in _F_i}\n_m_Comp1_bbu = Map()\ndef _maint__m_Comp1_bbu_add(_e):\n (v11_1, v11_2, v11_3) = _e\n if ((v11_1, v11_2) not in _m_Comp1_bbu):\n _m_Comp1_bbu[(v11_1, v11_2)] = set()\n _m_Comp1_bbu[(v11_1, v11_2)].add(v11_3)\n\ndef _maint__m_Comp1_bbu_remove(_e):\n (v12_1, v12_2, v12_3) = _e\n _m_Comp1_bbu[(v12_1, v12_2)].remove(v12_3)\n if (len(_m_Comp1_bbu[(v12_1, v12_2)]) == 0):\n del _m_Comp1_bbu[(v12_1, v12_2)]\n\n_m__M_in = Map()\ndef _maint__m__M_in_add(_e):\n (v9_1, v9_2) = _e\n if (v9_2 not in _m__M_in):\n _m__M_in[v9_2] = set()\n _m__M_in[v9_2].add(v9_1)\n\n_m__MAP_uub = Map()\ndef _maint__m__MAP_uub_add(_e):\n (v7_1, v7_2, v7_3) = _e\n if (v7_3 not in _m__MAP_uub):\n _m__MAP_uub[v7_3] = set()\n _m__MAP_uub[v7_3].add((v7_1, v7_2))\n\nComp1 = RCSet()\ndef _maint_Comp1__MAP_add(_e):\n # Iterate {(v1_m, v1_k, v1_m_m_k_k, v1_o, v1_o_i) : (v1_m, v1_k, v1_m_m_k_k) in deltamatch(_MAP, 'bbb', _e, 1), (v1_m_m_k_k, v1_o) in _M, (v1_o, v1_o_i) in _F_i}\n (v1_m, v1_k, v1_m_m_k_k) = _e\n if isinstance(v1_m_m_k_k, Set):\n for v1_o in v1_m_m_k_k:\n if hasattr(v1_o, 'i'):\n v1_o_i = v1_o.i\n if ((v1_m, v1_k, v1_o_i) not in Comp1):\n Comp1.add((v1_m, v1_k, v1_o_i))\n # Begin maint _m_Comp1_bbu after \"Comp1.add((v1_m, v1_k, v1_o_i))\"\n _maint__m_Comp1_bbu_add((v1_m, v1_k, v1_o_i))\n # End maint _m_Comp1_bbu after \"Comp1.add((v1_m, v1_k, v1_o_i))\"\n else:\n Comp1.incref((v1_m, v1_k, v1_o_i))\n\ndef _maint_Comp1__M_add(_e):\n # Iterate {(v3_m, v3_k, v3_m_m_k_k, v3_o, v3_o_i) : (v3_m, v3_k, v3_m_m_k_k) in _MAP, (v3_m_m_k_k, v3_o) in deltamatch(_M, 'bb', _e, 1), (v3_o, v3_o_i) in _F_i}\n (v3_m_m_k_k, v3_o) = _e\n if hasattr(v3_o, 'i'):\n v3_o_i = v3_o.i\n for (v3_m, v3_k) in (_m__MAP_uub[v3_m_m_k_k] if (v3_m_m_k_k in _m__MAP_uub) else set()):\n if ((v3_m, v3_k, v3_o_i) not in Comp1):\n Comp1.add((v3_m, v3_k, v3_o_i))\n # Begin maint _m_Comp1_bbu after \"Comp1.add((v3_m, v3_k, v3_o_i))\"\n _maint__m_Comp1_bbu_add((v3_m, v3_k, v3_o_i))\n # End maint _m_Comp1_bbu after \"Comp1.add((v3_m, v3_k, v3_o_i))\"\n else:\n Comp1.incref((v3_m, v3_k, v3_o_i))\n\ndef _maint_Comp1__F_i_add(_e):\n # Iterate {(v5_m, v5_k, v5_m_m_k_k, v5_o, v5_o_i) : (v5_m, v5_k, v5_m_m_k_k) in _MAP, (v5_m_m_k_k, v5_o) in _M, (v5_o, v5_o_i) in deltamatch(_F_i, 'bb', _e, 1)}\n (v5_o, v5_o_i) = _e\n for v5_m_m_k_k in (_m__M_in[v5_o] if (v5_o in _m__M_in) else set()):\n for (v5_m, v5_k) in (_m__MAP_uub[v5_m_m_k_k] if (v5_m_m_k_k in _m__MAP_uub) else set()):\n if ((v5_m, v5_k, v5_o_i) not in Comp1):\n Comp1.add((v5_m, v5_k, v5_o_i))\n # Begin maint _m_Comp1_bbu after \"Comp1.add((v5_m, v5_k, v5_o_i))\"\n _maint__m_Comp1_bbu_add((v5_m, v5_k, v5_o_i))\n # End maint _m_Comp1_bbu after \"Comp1.add((v5_m, v5_k, v5_o_i))\"\n else:\n Comp1.incref((v5_m, v5_k, v5_o_i))\n\nm = Map()\ns1 = Set()\ns2 = Set()\nm['a'] = s1\n# Begin maint _m__MAP_uub after \"_MAP.add((m, 'a', s1))\"\n_maint__m__MAP_uub_add((m, 'a', s1))\n# End maint _m__MAP_uub after \"_MAP.add((m, 'a', s1))\"\n# Begin maint Comp1 after \"_MAP.add((m, 'a', s1))\"\n_maint_Comp1__MAP_add((m, 'a', s1))\n# End maint Comp1 after \"_MAP.add((m, 'a', s1))\"\nm['b'] = s2\n# Begin maint _m__MAP_uub after \"_MAP.add((m, 'b', s2))\"\n_maint__m__MAP_uub_add((m, 'b', s2))\n# End maint _m__MAP_uub after \"_MAP.add((m, 'b', s2))\"\n# Begin maint Comp1 after \"_MAP.add((m, 'b', s2))\"\n_maint_Comp1__MAP_add((m, 'b', s2))\n# End maint Comp1 after \"_MAP.add((m, 'b', s2))\"\nfor i in range(10):\n o = Obj()\n o.i = i\n # Begin maint Comp1 after \"_F_i.add((o, i))\"\n _maint_Comp1__F_i_add((o, i))\n # End maint Comp1 after \"_F_i.add((o, i))\"\n if (i % 2):\n s1.add(o)\n # Begin maint _m__M_in after \"_M.add((s1, o))\"\n _maint__m__M_in_add((s1, o))\n # End maint _m__M_in after \"_M.add((s1, o))\"\n # Begin maint Comp1 after \"_M.add((s1, o))\"\n _maint_Comp1__M_add((s1, o))\n # End maint Comp1 after \"_M.add((s1, o))\"\n else:\n s2.add(o)\n # Begin maint _m__M_in after \"_M.add((s2, o))\"\n _maint__m__M_in_add((s2, o))\n # End maint _m__M_in after \"_M.add((s2, o))\"\n # Begin maint Comp1 after \"_M.add((s2, o))\"\n _maint_Comp1__M_add((s2, o))\n # End maint Comp1 after \"_M.add((s2, o))\"\nk = 'a'\nprint(sorted((_m_Comp1_bbu[(m, k)] if ((m, k) in _m_Comp1_bbu) else set())))\nk = 'b'\nprint(sorted((_m_Comp1_bbu[(m, k)] if ((m, k) in _m_Comp1_bbu) else set())))" }, { "alpha_fraction": 0.4121306240558624, "alphanum_fraction": 0.4743390381336212, "avg_line_length": 22.814815521240234, "blob_id": "e183ebf58bf3b1f1be6e4a995ceae84161a3c4d0", "content_id": "2b5981e83e1e77846c4e9a3f1a5d2ab110f03f12", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 643, "license_type": "no_license", "max_line_length": 68, "num_lines": 27, "path": "/incoq/tests/util/test_topsort.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Unit tests for topsort.py.\"\"\"\n\n\nimport unittest\n\nfrom incoq.util.topsort import *\n\n\nclass TopsortCase(unittest.TestCase):\n \n def test_topsort(self):\n V = [1, 2, 3, 4, 5]\n E = [(1, 2), (1, 3), (2, 3), (3, 4), (4, 5), (3, 5), (1, 5)]\n order = topsort(V, E)\n exp_order = [1, 2, 3, 4, 5]\n self.assertEqual(order, exp_order)\n \n V = [1, 2, 3, 4, 5]\n E = [(1, 2), (2, 3), (3, 1), (4, 5)]\n order = topsort(V, E)\n self.assertEqual(order, None)\n cycle = get_cycle(V, E)\n self.assertCountEqual(cycle, {1, 2, 3})\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.5930371880531311, "alphanum_fraction": 0.5939298868179321, "avg_line_length": 32.38848876953125, "blob_id": "6c6eb7e6d1ee91cd78e43847010eb880a0fb92ba", "content_id": "8e617016b85a2b8feb114422994a626f26bed557", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 32487, "license_type": "no_license", "max_line_length": 79, "num_lines": 973, "path": "/incoq/compiler/incast/util.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Miscellaneous utilities, mostly used outside this subpackage.\"\"\"\n\n\n__all__ = [\n 'VarsFinder',\n 'VarRenamer',\n 'ScopeVisitor',\n 'prefix_names',\n 'NameGenerator',\n 'is_injective',\n 'QueryReplacer',\n 'QueryMapper',\n 'StmtTransformer',\n 'OuterMaintTransformer',\n 'rewrite_compclauses',\n 'maint_skeleton',\n 'FuncDefLister',\n 'elim_deadfuncs',\n 'N',\n 'DemfuncMaker',\n]\n\n\nfrom itertools import chain\nfrom contextlib import contextmanager\nfrom collections.abc import Callable\nfrom simplestruct.type import checktype, checktype_seq\nimport iast\n\nfrom incoq.util.collections import OrderedSet\nfrom .helpers import is_vartuple, get_vartuple, get_plainfuncdef, plainfuncdef\nfrom .nodes import *\nfrom .structconv import NodeVisitor, NodeTransformer, Templater\n\n\nclass VarsFinder(NodeVisitor):\n \n \"\"\"Simple finder of variables (Name nodes).\n \n Flags:\n \n ignore_store:\n Name nodes with Store context are ignored, as are update\n operations (e.g. SetUpdate). As an exception, Name nodes\n on the LHS of Enumerators are not ignored. This is to\n ensure safety under pattern matching semantics.\n ignore_functions:\n Name nodes that appear to be functions are ignored.\n ignore_rels:\n Names that appear to be relations are ignored.\n \n The builtins None, True, and False are always excluded, as they\n are NameConstants, not variables.\n \"\"\"\n \n def __init__(self, *,\n ignore_store=False,\n ignore_functions=False,\n ignore_rels=False):\n super().__init__()\n self.ignore_store = ignore_store\n self.ignore_functions = ignore_functions\n self.ignore_rels = ignore_rels\n \n def process(self, tree):\n self.usedvars = OrderedSet()\n super().process(tree)\n return self.usedvars\n \n def visit_Name(self, node):\n self.generic_visit(node)\n if not (self.ignore_store and isinstance(node.ctx, Store)):\n self.usedvars.add(node.id)\n \n def visit_Call(self, node):\n class IGNORE(iast.AST):\n _meta = True\n \n if isinstance(node.func, Name) and self.ignore_functions:\n self.generic_visit(node._replace(func=IGNORE()))\n \n else:\n self.generic_visit(node)\n \n def visit_Enumerator(self, node):\n if is_vartuple(node.target):\n # Bypass ignore_store.\n vars = get_vartuple(node.target)\n self.usedvars.update(vars)\n else:\n self.visit(node.target)\n \n if not (self.ignore_rels and isinstance(node.iter, Name)):\n self.visit(node.iter)\n \n def visit_Comp(self, node):\n self.visit(node.resexp)\n # Hack to ensure we don't grab rels on RHS of\n # membership conditions.\n for i in node.clauses:\n if (self.ignore_rels and\n isinstance(i, Compare) and\n len(i.ops) == len(i.comparators) == 1 and\n isinstance(i.ops[0], In) and\n isinstance(i.comparators[0], Name)):\n self.visit(i.left)\n else:\n self.visit(i)\n \n def visit_Aggregate(self, node):\n if isinstance(node.value, Name) and self.ignore_rels:\n return\n else:\n self.generic_visit(node)\n \n def visit_SetMatch(self, node):\n if isinstance(node.target, Name) and self.ignore_rels:\n self.visit(node.key)\n else:\n self.generic_visit(node)\n \n def update_helper(self, node):\n IGNORE = object()\n \n if isinstance(node.target, Name) and self.ignore_store:\n self.generic_visit(node._replace(target=IGNORE))\n \n else:\n self.generic_visit(node)\n \n visit_SetUpdate = update_helper\n visit_RCSetRefUpdate = update_helper\n visit_AssignKey = update_helper\n visit_DelKey = update_helper\n\n\nclass VarRenamer(NodeTransformer):\n \n \"\"\"Rename occurrences of variables according to a substitution\n mapping. The mapping values can either be strings, or functions\n that map from old variable name to new name. None indicates\n no change.\n \"\"\"\n \n def __init__(self, subst):\n super().__init__()\n self.subst = subst\n \n def visit_Name(self, node):\n val = self.subst.get(node.id, None)\n if isinstance(val, Callable):\n val = val(node.id)\n \n if val is None:\n return node\n elif isinstance(val, str):\n return node._replace(id=val)\n else:\n raise ValueError(\"Bad substitution for '{}': {}\".format(\n node.id, repr(val)))\n\n\nclass ScopeVisitor(NodeVisitor):\n \n \"\"\"Visitor that tracks current scope information during processing.\n The current scope is a stack of sets of names. Each stack entry\n corresponds to a lexical scope, and the sets contain names bound in\n that scope.\n \n Lexical scopes are created for the top-level program, function\n definitions, and comprehensions. Identifiers are considered bound\n if a Name node appears with Store context, or if it is a formal\n parameter to a FunctionDef. Binding is not flow-sensitive within\n a scope, and deletion (Del context) does not remove bindings.\n \n These rules are similar to Python's own scoping rules.\n \"\"\"\n \n @staticmethod\n def bvars_from_scopestack(scope_stack):\n \"\"\"Return an OrderedSet of all variables bound by some\n entry in a scope stack.\n \"\"\"\n return OrderedSet(chain(*scope_stack))\n \n def current_bvars(self):\n \"\"\"Return an OrderedSet of the variables bound at this\n point in the traversal.\n \"\"\"\n return self.bvars_from_scopestack(self._scope_stack)\n \n def process(self, tree):\n self._scope_stack = []\n super().process(tree)\n assert len(self._scope_stack) == 0\n \n def enter(self):\n self._scope_stack.append(OrderedSet())\n \n def exit(self):\n self._scope_stack.pop()\n \n def bind(self, name):\n self._scope_stack[-1].add(name)\n \n def visit_Module(self, node):\n self.enter()\n self.generic_visit(node)\n self.exit()\n \n def visit_FunctionDef(self, node):\n # Bind the name of the function in the outer scope,\n # its parameters in the inner scope.\n # Fancy FunctionDef features (decorators, annotations,\n # variable/non-positional arguments) aren't accounted for.\n self.bind(node.name)\n self.enter()\n for a in node.args.args:\n self.bind(a.arg)\n self.generic_visit(node)\n self.exit()\n \n def visit_Comp(self, node):\n self.enter()\n self.generic_visit(node)\n self.exit()\n \n def visit_Name(self, node):\n if isinstance(node.ctx, Store):\n self.bind(node.id)\n \n # For nodes that introduce bindings, process the RHS\n # before the LHS.\n \n def visit_Enumerator(self, node):\n self.visit(node.iter)\n self.visit(node.target)\n \n def visit_Assign(self, node):\n self.visit(node.value)\n self.visit(node.targets)\n \n def visit_AugAssign(self, node):\n self.visit(node.value)\n self.visit(node.op)\n self.visit(node.target)\n \n def visit_For(self, node):\n self.visit(node.iter)\n self.visit(node.target)\n self.visit(node.body)\n self.visit(node.orelse)\n\n\ndef prefix_names(tree, names, prefix):\n \"\"\"Given a tree and a sequence of names, produce a new tree\n where each occurrence of those names is prefixed with the given\n string.\n \"\"\"\n checktype_seq(names, str)\n checktype(prefix, str)\n subst = {n: prefix + n for n in names}\n return Templater.run(tree, subst)\n\nclass NameGenerator:\n \n \"\"\"Generates a sequence of distinct strings, e.g. for fresh\n identifiers or prefixes of fresh identifiers.\n \"\"\"\n \n def __init__(self, fmt='v{}', counter=1):\n self.fmt = fmt\n \"\"\"String format, including '{}' where the counter goes.\"\"\"\n self.counter = counter\n \"\"\"Counter.\"\"\"\n \n def peek(self):\n \"\"\"Return the next string without incrementing the counter.\"\"\"\n return self.fmt.format(str(self.counter))\n \n def next(self):\n \"\"\"Return the next string and increment the counter.\"\"\"\n name = self.peek()\n self.counter += 1\n return name\n \n def next_prefix(self):\n \"\"\"Return the next prefix and increment the counter.\"\"\"\n return self.next() + '_'\n \n def __iter__(self):\n return self\n \n def __next__(self):\n return self.next()\n \n def prefix_names(self, code, names):\n \"\"\"Use prefix_names() with a prefix based on this generator.\"\"\"\n prefix = self.next_prefix()\n return prefix_names(code, names, prefix)\n\n\ndef is_injective(tree):\n \"\"\"Return True if the expression can be guaranteed to be injective,\n i.e., to evaluate to distinct results for distinct values of its\n variables.\n \"\"\"\n # Currently we just consider names and tuples to be injective.\n class InjTester(NodeVisitor):\n \n def process(self, tree):\n self.answer = True\n super().process(tree)\n return self.answer\n \n def generic_visit(self, node):\n if not isinstance(node, (Name, Tuple, Load, Store, Del)):\n self.answer = False\n super().generic_visit(node)\n \n return InjTester.run(tree)\n\n\nclass QueryReplacer(NodeTransformer):\n \n \"\"\"Replace each occurrence of one query with another.\"\"\"\n \n def __init__(self, from_query, to_query):\n super().__init__()\n self.from_query = from_query\n self.to_query = to_query\n \n def visit_Comp(self, node):\n if node == self.from_query:\n return self.to_query\n else:\n return self.generic_visit(node)\n \n def visit_Aggregate(self, node):\n if node == self.from_query:\n return self.to_query\n else:\n return self.generic_visit(node)\n\nclass QueryMapper(NodeTransformer):\n \n \"\"\"For each unique query, replace all occurrences of that query\n with the result of the methods map_Comp() or map_Aggregate(),\n provided by the subclass. Innermost queries are handled first.\n The map_* methods are called only once per unique query.\n \n If ignore_invalid is True, do not process queries having an\n 'invalid' option key set to True. Subqueries of these queries\n will still be recursively processed.\n \"\"\"\n \n ignore_invalid = False\n \n def __init__(self):\n super().__init__()\n # Replacement mapping.\n self.comps = {}\n self.aggrs = {}\n \n def helper(self, node, handler_name, replmap):\n node = self.generic_visit(node)\n \n assert node.options is not None\n invalid = node.options.get('_invalid', False)\n if invalid and self.ignore_invalid:\n return node\n \n handler = getattr(self, handler_name, None)\n if handler is None:\n return node\n \n repl = replmap.get(node, None)\n if repl is None:\n repl = handler(node)\n replmap[node] = repl\n \n return repl\n \n def visit_Comp(self, node):\n return self.helper(node, 'map_Comp', self.comps)\n \n def visit_Aggregate(self, node):\n return self.helper(node, 'map_Aggregate', self.aggrs)\n\n\nclass StmtTransformer(NodeTransformer):\n \n \"\"\"Transformer for inserting code immediately before or after\n the statement containing the current node.\n \n We keep a stack of lists of statements to insert before and\n after the current statement node. (A stack is needed because\n statements may be nested in the case of function and class\n definitions.) The subclass may access the current tops of the\n stacks using the pre_stmts and post_stmts properties. When a\n statement node is done being processed, if either of these lists\n is non-empty, their contents are added around the (result of\n processing the) node. \n \"\"\"\n \n def process(self, tree):\n # One entry for each stmt-typed node we are inside of.\n self.pre_stmt_stack = []\n self.post_stmt_stack = []\n result = super().process(tree)\n assert len(self.pre_stmt_stack) == len(self.post_stmt_stack) == 0\n return result\n \n @property\n def pre_stmts(self):\n return self.pre_stmt_stack[-1]\n \n @property\n def post_stmts(self):\n return self.post_stmt_stack[-1]\n \n def node_visit(self, node):\n if isinstance(node, stmt):\n self.pre_stmt_stack.append([])\n self.post_stmt_stack.append([])\n \n result = super().node_visit(node)\n \n if isinstance(node, stmt):\n pre_stmts = self.pre_stmt_stack.pop()\n post_stmts = self.post_stmt_stack.pop()\n \n if len(pre_stmts) == len(post_stmts) == 0:\n # If there's nothing to insert, don't muck with the\n # result, which could cause unnecessary copying.\n return result\n else:\n if result is None:\n result = (node,)\n elif isinstance(result, AST):\n result = (result,)\n result = (tuple(pre_stmts) + tuple(result) +\n tuple(post_stmts))\n \n return result\n\n\nclass OuterMaintTransformer(NodeTransformer):\n \n \"\"\"Transformer for inserting new maintenance code outside all\n existing maintenance code for a given set of invariants. That is,\n if the code already contains maintenance\n \n I1's precode for change to R\n I2's precode for change to R\n update R\n I2's postcode for change to R\n I1's postcode for change to R\n \n this visitor can be used to place new maintenance for R before or\n between the precode for I1 and I2 -- and likewise after or between\n the postcode -- instead of immediately before or after the update.\n \n Precisely: Let S be a set of invariants. We define the \"active\" node\n A_S(N) for a node N to be the unique innermost node such that\n \n 1) A_S(N) is a (non-strict) ancestor of N\n 2) A_S(N) is not descended from the \"update\" field of a\n Maintenance node for any invariant in S, unless it is more\n recently descended from a \"precode\"/\"postcode\" field of\n another Maintenance node\n \n Note that A_S(N) is always either N itself, or else a Maintenance\n node for an invariant in S and from whose \"update\" field N is\n descended.\n \n Maintenance in response to update N gets inserted immediately before\n or after A_S(N). This ensures that all inserted code for the update\n sees a view of the invariants in S that is consistent with the\n state of the updated variable. Conversely, the already existing code\n for invariants in S see the new invariant as consistent with this\n variable's new value beforehand or old value afterwards.\n \"\"\"\n \n def __init__(self, invs):\n super().__init__()\n if invs is not None:\n invs = set(invs)\n self.invs = invs\n \"\"\"Set of invariants whose maintenance new code should be\n inserted outside of. If None, insert outside of all\n invariant maintenance.\n \"\"\"\n \n # As we descend into Maintenance nodes, keep a stack to\n # remember 1) whether there is an active node, and 2) details\n # about currently pending code to be inserted.\n \n def process(self, tree):\n # One entry for every Maintenance node we're in, plus\n # one for the beginning. Tip indicates whether we are\n # currently strictly inside an active (maintenance) node.\n self.active_stack = [False]\n # One entry for every Maintenance node whose update\n # node we're in, plus one for the beginning.\n self.maintinfo_stack = [None]\n return super().process(tree)\n \n @property\n def inside_active(self):\n \"\"\"True if we're strictly inside an active (maintenance) node.\n (False when we're at the active node itself.)\n \"\"\"\n return self.active_stack[-1]\n \n def with_outer_maint(self, node, name, desc, precode, postcode):\n \"\"\"In the handler for an update, call this method and return\n its value in place of \"node\" in order to wrap maintenance info\n around the update or its active maintenance node.\n \"\"\"\n # We can only handle keeping track of one new piece of\n # maintenance at a time. Multiple pieces shouldn't really\n # happen, because the \"update\" field of Maintenance nodes\n # should only contain other Maintenance nodes and a single\n # update statement. (It's ok to be in the \"update\" portion\n # of an outer Maintenance node if we're also inside precode\n # or postcode of an inner Maintenance node, because then\n # the outer one isn't the active node.)\n if self.inside_active:\n assert self.maintinfo_stack[-1] is None\n self.maintinfo_stack[-1] = (name, desc, precode, postcode)\n return node\n else:\n return Maintenance(name, desc, precode, (node,), postcode)\n \n def visit_Maintenance(self, node):\n @contextmanager\n def active(b):\n self.active_stack.append(b)\n yield\n self.active_stack.pop()\n \n # We can only be the active node while we are visiting\n # update code, not pre and post code.\n \n with active(False):\n precode = self.visit(node.precode)\n \n # This node is the active one if no higher node is active,\n # and if this node maintains for one of the given invariants.\n we_are_active = (not self.inside_active and\n (self.invs is None or\n node.name in self.invs))\n # The update section is inside an active node if\n # we are inside one, or if we are it.\n inside_is_in_active = self.inside_active or we_are_active\n \n if we_are_active:\n # Create a stack entry for new code to insert\n # before/after this one.\n self.maintinfo_stack.append(None)\n with active(inside_is_in_active):\n update = self.visit(node.update)\n # Pop and save locally, so as not to interfere with\n # postcode processing below.\n saved_maintinfo = self.maintinfo_stack.pop()\n else:\n with active(inside_is_in_active):\n update = self.visit(node.update)\n \n with active(False):\n postcode = self.visit(node.postcode)\n \n # Do normal NodeTransformer replacements first,\n # before worrying about inserting outer maintenance.\n node = node._replace(precode=precode,\n update=update,\n postcode=postcode)\n \n # If we are the active maintenance node and new maintenance\n # info was added, create and return the new Maintenance node.\n if we_are_active and saved_maintinfo is not None:\n (name, desc, precode, postcode) = saved_maintinfo\n return Maintenance(name, desc, precode, (node,), postcode)\n else:\n return node\n\n\ndef rewrite_compclauses(comp, rewriter, *,\n after=False, enum_only=False, recursive=False,\n resexp=True):\n \"\"\"Apply a rewriter to each part of the comprehension, sometimes\n inserting new clauses to the left or right of the rewritten part.\n \n rewriter is applied to each clause in left-to-right order, and\n finally to the result expression. It returns a pair of the new\n clause/expression AST, and a list of clauses to be inserted\n immediately before or after this AST (depending on the after\n flag).\n \n If clauses are generated for the result expression, they are\n always appended to the end of the clause list, regardless of\n the after flag.\n \n If enum_only is True, only process enumerator clauses, not\n condition clauses or the result expression. If enum_only is False\n but resexp is True, process all clauses but not the result\n expression.\n \n If recursive is True, also call rewriter on the newly inserted\n clauses as well.\n \"\"\"\n checktype(comp, Comp)\n \n # Use a recursive function to handle the case of recursively\n # processing added clauses.\n \n def process(clauses):\n result = []\n for cl in clauses:\n if enum_only and not isinstance(cl, Enumerator):\n result.append(cl)\n continue\n \n mod_clause, add_clauses = rewriter(cl)\n \n if recursive:\n add_clauses = process(add_clauses)\n \n if after:\n result.append(mod_clause)\n result.extend(add_clauses)\n else:\n result.extend(add_clauses)\n result.append(mod_clause)\n \n return result\n \n new_clauses = process(comp.clauses)\n \n # Handle result expression.\n if enum_only or not resexp:\n new_resexp = comp.resexp\n else:\n new_resexp, add_clauses = rewriter(comp.resexp)\n if recursive:\n add_clauses = process(add_clauses)\n new_clauses.extend(add_clauses)\n \n return comp._replace(resexp=new_resexp, clauses=tuple(new_clauses))\n\n\n# TODO: The idiom used in the following code is a bit tricky.\n# We need bottom-up info about whether a node contains a Maintenance\n# node in any of its subtrees. So one visitor builds up a set of\n# nodes for which this predicate is True. The set is by node id\n# (memory address), not node value, for efficient comparisons.\n# But another visitor transforms the tree, which means modifying\n# this mapping to correctly include newly-introduced trees.\n# This could be generalized into iast. Alternatively, this is all\n# because the node-replacement transformer idiom makes it difficult\n# to store auxiliary information on nodes. Maybe we could do a pre\n# and post processing step that convert between a new node format\n# that also includes this auxiliary info as fields.\n\nclass MaintFinder(NodeVisitor):\n \n \"\"\"Return a set of ids (memory addresses) of nodes in a tree\n that are or contain Maintenance nodes.\n \"\"\"\n \n # Should be O(n) for the whole tree, whereas a naive approach\n # is O(n^2).\n \n def process(self, tree):\n self.result = set()\n super().process(tree)\n return self.result\n \n # Redefine some NodeVisitor internals to return bools indicating\n # the presence of a Maintenance node in a subtree.\n \n def visit(self, tree):\n if isinstance(tree, AST):\n return self.node_visit(tree)\n elif isinstance(tree, tuple):\n return self.seq_visit(tree)\n else:\n return False\n \n def node_visit(self, node):\n hasmaint = super().node_visit(node)\n if hasmaint:\n self.result.add(id(node))\n return hasmaint\n \n def seq_visit(self, seq):\n hasmaint = False\n for item in seq:\n hasmaint |= self.visit(item)\n return hasmaint\n \n def generic_visit(self, node):\n hasmaint = False\n for field in node._fields:\n value = getattr(node, field)\n hasmaint |= self.visit(value)\n return hasmaint\n \n def visit_Maintenance(self, node):\n self.generic_visit(node)\n return True\n\nclass ControlFlattener(NodeTransformer):\n \n \"\"\"Flatten For, If, and While statements by replacing these nodes\n with their bodies, so long as there is no else clause.\n \"\"\"\n \n def helper(self, node):\n # Recursing might not even be necessary since we call this\n # visitor bottom-up in Skeletonizer.\n node = self.generic_visit(node)\n if node.orelse == ():\n return node.body\n \n visit_For = helper\n visit_While = helper\n visit_If = helper\n\nclass Skeletonizer(NodeTransformer):\n \n \"\"\"Remove details of maintenance code, retaining just the outline.\n \n Specifically, any precode or postcode that is not another\n Maintenance node is removed. If precode or postcode would be\n made empty by this, Pass is inserted. Where possible, statements\n that introduce new blocks of code are eliminated, and any nested\n Maintenance nodes are pushed up in the tree.\n \"\"\"\n \n def process(self, tree):\n self.hasmaint_nodes = MaintFinder.run(tree)\n return super().process(tree)\n \n def node_visit(self, node):\n # Preserve hasmaint info when we substitute new nodes\n # into the tree.\n hasmaint = id(node) in self.hasmaint_nodes\n result = super().node_visit(node)\n if hasmaint:\n self.hasmaint_nodes.add(id(result))\n return result\n \n def filter(self, stmts):\n if len(stmts) == 0:\n return ()\n \n new_stmts = ControlFlattener.run(stmts)\n new_stmts = tuple(s for s in new_stmts if id(s) in self.hasmaint_nodes)\n if len(new_stmts) == 0:\n new_stmts = (Pass(),)\n \n return new_stmts\n \n def visit_Maintenance(self, node):\n node = self.generic_visit(node)\n \n precode = self.filter(node.precode)\n postcode = self.filter(node.postcode)\n \n return node._replace(precode=precode, postcode=postcode)\n\ndef maint_skeleton(tree):\n \"\"\"Return a modified tree that contains just the skeleton of\n maintenance code, omitting the exact code but keeping uses of\n nested maintenance code.\n \"\"\"\n return Skeletonizer.run(tree)\n\n\nclass FuncDefLister(NodeVisitor):\n \n \"\"\"Given a predicate, find all functions whose names match the\n predicate, returned as a map from name to definition node. There\n must be exactly one definition per function.\n \"\"\"\n \n def __init__(self, pred):\n super().__init__()\n self.pred = pred\n \n def process(self, tree):\n self.result = {}\n super().process(tree)\n return self.result\n \n def visit_FunctionDef(self, node):\n name = node.name\n if self.pred(name):\n if name in self.result:\n raise AssertionError('Multiple definitions for '\n 'function \\'{}\\''.format(name))\n self.result[name] = node\n\nclass FuncEliminator(NodeTransformer):\n \n \"\"\"Remove definitions of functions matching a predicate.\"\"\"\n \n def __init__(self, pred):\n super().__init__()\n self.pred = pred\n \n def visit_FunctionDef(self, node):\n if self.pred(node.name):\n return ()\n\ndef elim_deadfuncs(tree, pred):\n \"\"\"Remove definitions of functions matching the predicate, if they\n are not used.\n \"\"\"\n ### TODO: Should be run to fixed-point.\n used_names = VarsFinder.run(tree, ignore_functions=False)\n new_pred = lambda n: pred(n) and n not in used_names\n return FuncEliminator.run(tree, new_pred)\n\nclass N:\n \n \"\"\"Naming conventions.\"\"\"\n \n @classmethod\n def uset(cls, n):\n return '_U_' + n\n \n @classmethod\n def usetext(cls, n):\n return '_UEXT_' + n\n \n @classmethod\n def queryfunc(cls, n):\n return 'query_' + n\n \n @classmethod\n def demfunc(cls, n):\n return 'demand_' + n\n \n @classmethod\n def undemfunc(cls, n):\n return 'undemand_' + n\n \n @classmethod\n def deltaset(cls, n):\n return n + '_delta'\n\n\nclass DemfuncMaker:\n \n \"\"\"Generates code for demanding, undemanding, and query-demanding\n combinations of parameter values.\n \n There are two kinds of demand: extensional and intensional.\n Extensional demand is when something becomes demanded because the\n user says so. E.g. the user is about to perform a query, or the\n user thinks it is a good policy to precompute an answer for these\n parameter values. Intesional demand is when something becomes\n demanded for an inner query because it may be needed by the outer\n query. Intensional demand is controlled by invariants.\n \n We can think of intensional demand as being stored in a reference-\n counted set, and extensional demand as being stored in an ordinary\n set. The actual U-set is then their reference-counted union.\n LRU cache information is only tracked for extensional items.\n \"\"\"\n \n def __init__(self, name, specstr, demparams, lrulimit):\n self.name = name\n \"\"\"Query name, base name for functions.\"\"\"\n self.specstr = specstr\n \"\"\"Docstring description of query.\"\"\"\n self.demparams = tuple(demparams)\n \"\"\"Tuple of demand parameter names.\"\"\"\n self.lrulimit = lrulimit\n \"\"\"Number of entries in LRU cache; None if no LRU cache.\"\"\"\n \n from . import pe, ln, Str, Num, tuplify\n demcall_node = pe('DEMFUNC(__ARGS)',\n subst={'DEMFUNC': ln(N.demfunc(self.name))})\n demcall_node = demcall_node._replace(\n args=tuple(ln(p) for p in self.demparams))\n \n topvars = ['_top_v' + str(i)\n for i in range(1, len(self.demparams) + 1)]\n undemcall_node = pe('UNDEMFUNC(__ARGS)',\n subst={'UNDEMFUNC': ln(N.undemfunc(self.name))})\n undemcall_node = undemcall_node._replace(\n args=tuple(ln(p) for p in topvars))\n \n extset = 'Set' if self.lrulimit is None else 'LRUSet'\n \n self.subst = {'SPEC_STR': Str(self.specstr),\n 'USET': N.uset(self.name),\n 'USET_EXT': N.usetext(self.name),\n 'EXTSET': ln(extset),\n 'DEMPARAMS': tuplify(self.demparams),\n 'DEMCALL': demcall_node,\n 'UNDEMCALL': undemcall_node,\n 'UNDEMFUNC': ln(N.undemfunc(self.name)),\n 'S_TOPVARS': tuplify(topvars, lval=True)}\n if self.lrulimit is not None:\n self.subst['LRUSIZE'] = Num(lrulimit)\n \n def make_usetvars(self):\n from . import pc\n code = pc('''\n USET = RCSet()\n USET_EXT = EXTSET()\n ''', subst=self.subst)\n return code\n \n def make_demfunc(self):\n from . import pc\n code = pc('''\n SPEC_STR\n USET.rcadd(DEMPARAMS)\n ''', subst=self.subst)\n code = plainfuncdef(N.demfunc(self.name), self.demparams, code)\n return code\n \n def make_undemfunc(self):\n from . import pc\n code = pc('''\n SPEC_STR\n USET.rcremove(DEMPARAMS)\n ''', subst=self.subst)\n code = plainfuncdef(N.undemfunc(self.name), self.demparams, code)\n return code\n \n def make_queryfunc(self):\n from . import pc\n \n if self.lrulimit is None:\n code = pc('''\n SPEC_STR\n if DEMPARAMS not in USET_EXT:\n USET_EXT.add(DEMPARAMS)\n DEMCALL\n return True\n ''', subst=self.subst)\n \n else:\n code = pc('''\n SPEC_STR\n if DEMPARAMS not in USET_EXT:\n while len(USET_EXT) >= LRUSIZE:\n S_TOPVARS = _top = USET_EXT.peek()\n UNDEMCALL\n USET_EXT.remove(_top)\n USET_EXT.add(DEMPARAMS)\n DEMCALL\n else:\n USET_EXT.ping(DEMPARAMS)\n return True\n ''', subst=self.subst)\n \n code = plainfuncdef(N.queryfunc(self.name), self.demparams, code)\n return code\n \n def make_alldem(self):\n code = (self.make_usetvars() +\n self.make_demfunc() +\n self.make_undemfunc() +\n self.make_queryfunc())\n return code\n" }, { "alpha_fraction": 0.5503944158554077, "alphanum_fraction": 0.5515629649162292, "avg_line_length": 26.384000778198242, "blob_id": "7e2fc8350378b7b3629d241ffc1c736926668681", "content_id": "285dbb4a3e3ab679fc8e4bbd390eedcaeb162795", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3423, "license_type": "no_license", "max_line_length": 71, "num_lines": 125, "path": "/incoq/transform/statsdb.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Persistent storage of transformation stats.\"\"\"\n\n\n__all__ = [\n 'StatsDB',\n 'BaseSession',\n 'Session',\n]\n\n\nimport os\nimport pickle\nimport code\n\n\ntry:\n from tabulate import tabulate\n HAVE_TABULATE = True\nexcept ImportError:\n HAVE_TABULATE = False\n\n\nclass StatsDB:\n \n \"\"\"A collection of transformation stats and an associated path\n to a persistent file. Stats are represented as a mapping from\n an entry name (the display name for that particular transformation)\n to a stats dictionary (as returned by the compiler).\n \"\"\"\n \n def __init__(self, path):\n self.path = path\n self.allstats = {}\n self.load()\n \n def load(self):\n \"\"\"Load stats from file if it exists.\"\"\"\n if os.path.exists(self.path):\n with open(self.path, 'rb') as db:\n self.allstats = pickle.load(db)\n \n def save(self):\n \"\"\"Save stats and csv.\"\"\"\n with open(self.path, 'wb') as db:\n pickle.dump(self.allstats, db)\n\n\nclass BaseSession:\n \n \"\"\"Encapsulates shell state and commands. Provides a namespace\n field ns to be used as the local namespace of the interactive\n shell. Commands are methods beginning with \"cmd_\"; this prefix\n is stripped in the session.\n \"\"\"\n \n class Namespace(dict):\n \n def __init__(self, session):\n self.session = session\n \n def __missing__(self, key):\n cmd = getattr(self.session, 'cmd_' + key, None)\n if cmd is not None:\n return cmd\n raise KeyError(key)\n \n def __init__(self, statsdb):\n self.ns = self.Namespace(self)\n self.ns['statsdb'] = statsdb\n self.ns['allstats'] = statsdb.allstats\n \n def interact(self):\n \"\"\"Enter an interactive session until the user types exit().\"\"\"\n banner = 'Stats editor ({})'.format(self.ns['statsdb'].path)\n try:\n code.interact(banner=banner, local=self.ns)\n except SystemExit:\n pass\n \n def cmd_reload(self):\n self.ns['statsdb'].load()\n print('Statsdb reloaded')\n \n def cmd_save(self):\n self.ns['statsdb'].save()\n print('Statsdb saved')\n \n# def cmd_viewstats(self):\n# self.ns['statsdb'].print()\n\n\nclass Session(BaseSession):\n \n def __init__(self, statsdb, name=None):\n super().__init__(statsdb)\n if name is not None:\n self.cmd_switch(name)\n \n def cmd_showentries(self):\n for k in sorted(self.ns['allstats'].keys()):\n print(k)\n \n def cmd_showstats(self):\n for k, v in sorted(self.ns['stats'].items()):\n print('{:<20} {}'.format(k + ':', v))\n \n def cmd_switch(self, name):\n self.ns['stats'] = self.ns['allstats'][name]\n \n def cmd_showcosts(self, name=None):\n if name is not None:\n stats = self.ns['allstats'][name]\n else:\n stats = self.ns['stats']\n rows = []\n from incoq.compiler.cost import PrettyPrinter\n for func, cost in sorted(stats['costs'].items()):\n coststr = 'O({})'.format(PrettyPrinter.run(cost))\n rows.append([func, coststr])\n \n if HAVE_TABULATE:\n print(tabulate(rows, tablefmt='grid'))\n else:\n print('\\n'.join('{:<20} {}'.format(f + ':', c)\n for f, c in rows))\n" }, { "alpha_fraction": 0.4188251793384552, "alphanum_fraction": 0.5184242129325867, "avg_line_length": 51.55518341064453, "blob_id": "473ea032805226603bf990dd1ec9e47dd2358ccf", "content_id": "806d770c645914e4ea097787eb8b9ceeeefdabe8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15713, "license_type": "no_license", "max_line_length": 261, "num_lines": 299, "path": "/incoq/tests/programs/deminc/tup/objnest_out.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from incoq.runtime import *\n# Comp1 := {(s, x) : s in _U_Comp1, (s, _tup1) in _M, (_tup1, _tup2, z) in _TUP2, (_tup2, x, y) in _TUP2, ((x + y) == z)}\n# Comp1_Ts := {s : s in _U_Comp1}\n# Comp1_d_M := {(s, _tup1) : s in Comp1_Ts, (s, _tup1) in _M}\n# Comp1_T_tup1 := {_tup1 : (s, _tup1) in Comp1_d_M}\n# Comp1_d_TUP21 := {(_tup1, _tup2, z) : _tup1 in Comp1_T_tup1, (_tup1, _tup2, z) in _TUP2}\n# Comp1_T_tup2 := {_tup2 : (_tup1, _tup2, z) in Comp1_d_TUP21}\n# Comp1_d_TUP22 := {(_tup2, x, y) : _tup2 in Comp1_T_tup2, (_tup2, x, y) in _TUP2}\n_m_Comp1_out = Map()\ndef _maint__m_Comp1_out_add(_e):\n (v29_1, v29_2) = _e\n if (v29_1 not in _m_Comp1_out):\n _m_Comp1_out[v29_1] = set()\n _m_Comp1_out[v29_1].add(v29_2)\n\ndef _maint__m_Comp1_out_remove(_e):\n (v30_1, v30_2) = _e\n _m_Comp1_out[v30_1].remove(v30_2)\n if (len(_m_Comp1_out[v30_1]) == 0):\n del _m_Comp1_out[v30_1]\n\n_m_Comp1_d_TUP21_ubu = Map()\ndef _maint__m_Comp1_d_TUP21_ubu_add(_e):\n (v27_1, v27_2, v27_3) = _e\n if (v27_2 not in _m_Comp1_d_TUP21_ubu):\n _m_Comp1_d_TUP21_ubu[v27_2] = set()\n _m_Comp1_d_TUP21_ubu[v27_2].add((v27_1, v27_3))\n\ndef _maint__m_Comp1_d_TUP21_ubu_remove(_e):\n (v28_1, v28_2, v28_3) = _e\n _m_Comp1_d_TUP21_ubu[v28_2].remove((v28_1, v28_3))\n if (len(_m_Comp1_d_TUP21_ubu[v28_2]) == 0):\n del _m_Comp1_d_TUP21_ubu[v28_2]\n\n_m_Comp1_d_M_in = Map()\ndef _maint__m_Comp1_d_M_in_add(_e):\n (v25_1, v25_2) = _e\n if (v25_2 not in _m_Comp1_d_M_in):\n _m_Comp1_d_M_in[v25_2] = set()\n _m_Comp1_d_M_in[v25_2].add(v25_1)\n\ndef _maint__m_Comp1_d_M_in_remove(_e):\n (v26_1, v26_2) = _e\n _m_Comp1_d_M_in[v26_2].remove(v26_1)\n if (len(_m_Comp1_d_M_in[v26_2]) == 0):\n del _m_Comp1_d_M_in[v26_2]\n\nComp1_d_TUP22 = RCSet()\ndef _maint_Comp1_d_TUP22_Comp1_T_tup2_add(_e):\n # Iterate {(v21__tup2, v21_x, v21_y) : v21__tup2 in deltamatch(Comp1_T_tup2, 'b', _e, 1), (v21__tup2, v21_x, v21_y) in _TUP2}\n v21__tup2 = _e\n if (isinstance(v21__tup2, tuple) and (len(v21__tup2) == 2)):\n for (v21_x, v21_y) in setmatch({(v21__tup2, v21__tup2[0], v21__tup2[1])}, 'buu', v21__tup2):\n Comp1_d_TUP22.add((v21__tup2, v21_x, v21_y))\n\ndef _maint_Comp1_d_TUP22_Comp1_T_tup2_remove(_e):\n # Iterate {(v22__tup2, v22_x, v22_y) : v22__tup2 in deltamatch(Comp1_T_tup2, 'b', _e, 1), (v22__tup2, v22_x, v22_y) in _TUP2}\n v22__tup2 = _e\n if (isinstance(v22__tup2, tuple) and (len(v22__tup2) == 2)):\n for (v22_x, v22_y) in setmatch({(v22__tup2, v22__tup2[0], v22__tup2[1])}, 'buu', v22__tup2):\n Comp1_d_TUP22.remove((v22__tup2, v22_x, v22_y))\n\nComp1_T_tup2 = RCSet()\ndef _maint_Comp1_T_tup2_Comp1_d_TUP21_add(_e):\n # Iterate {(v19__tup1, v19__tup2, v19_z) : (v19__tup1, v19__tup2, v19_z) in deltamatch(Comp1_d_TUP21, 'bbb', _e, 1)}\n (v19__tup1, v19__tup2, v19_z) = _e\n if (v19__tup2 not in Comp1_T_tup2):\n Comp1_T_tup2.add(v19__tup2)\n # Begin maint Comp1_d_TUP22 after \"Comp1_T_tup2.add(v19__tup2)\"\n _maint_Comp1_d_TUP22_Comp1_T_tup2_add(v19__tup2)\n # End maint Comp1_d_TUP22 after \"Comp1_T_tup2.add(v19__tup2)\"\n else:\n Comp1_T_tup2.incref(v19__tup2)\n\ndef _maint_Comp1_T_tup2_Comp1_d_TUP21_remove(_e):\n # Iterate {(v20__tup1, v20__tup2, v20_z) : (v20__tup1, v20__tup2, v20_z) in deltamatch(Comp1_d_TUP21, 'bbb', _e, 1)}\n (v20__tup1, v20__tup2, v20_z) = _e\n if (Comp1_T_tup2.getref(v20__tup2) == 1):\n # Begin maint Comp1_d_TUP22 before \"Comp1_T_tup2.remove(v20__tup2)\"\n _maint_Comp1_d_TUP22_Comp1_T_tup2_remove(v20__tup2)\n # End maint Comp1_d_TUP22 before \"Comp1_T_tup2.remove(v20__tup2)\"\n Comp1_T_tup2.remove(v20__tup2)\n else:\n Comp1_T_tup2.decref(v20__tup2)\n\nComp1_d_TUP21 = RCSet()\ndef _maint_Comp1_d_TUP21_Comp1_T_tup1_add(_e):\n # Iterate {(v15__tup1, v15__tup2, v15_z) : v15__tup1 in deltamatch(Comp1_T_tup1, 'b', _e, 1), (v15__tup1, v15__tup2, v15_z) in _TUP2}\n v15__tup1 = _e\n if (isinstance(v15__tup1, tuple) and (len(v15__tup1) == 2)):\n for (v15__tup2, v15_z) in setmatch({(v15__tup1, v15__tup1[0], v15__tup1[1])}, 'buu', v15__tup1):\n Comp1_d_TUP21.add((v15__tup1, v15__tup2, v15_z))\n # Begin maint _m_Comp1_d_TUP21_ubu after \"Comp1_d_TUP21.add((v15__tup1, v15__tup2, v15_z))\"\n _maint__m_Comp1_d_TUP21_ubu_add((v15__tup1, v15__tup2, v15_z))\n # End maint _m_Comp1_d_TUP21_ubu after \"Comp1_d_TUP21.add((v15__tup1, v15__tup2, v15_z))\"\n # Begin maint Comp1_T_tup2 after \"Comp1_d_TUP21.add((v15__tup1, v15__tup2, v15_z))\"\n _maint_Comp1_T_tup2_Comp1_d_TUP21_add((v15__tup1, v15__tup2, v15_z))\n # End maint Comp1_T_tup2 after \"Comp1_d_TUP21.add((v15__tup1, v15__tup2, v15_z))\"\n\ndef _maint_Comp1_d_TUP21_Comp1_T_tup1_remove(_e):\n # Iterate {(v16__tup1, v16__tup2, v16_z) : v16__tup1 in deltamatch(Comp1_T_tup1, 'b', _e, 1), (v16__tup1, v16__tup2, v16_z) in _TUP2}\n v16__tup1 = _e\n if (isinstance(v16__tup1, tuple) and (len(v16__tup1) == 2)):\n for (v16__tup2, v16_z) in setmatch({(v16__tup1, v16__tup1[0], v16__tup1[1])}, 'buu', v16__tup1):\n # Begin maint Comp1_T_tup2 before \"Comp1_d_TUP21.remove((v16__tup1, v16__tup2, v16_z))\"\n _maint_Comp1_T_tup2_Comp1_d_TUP21_remove((v16__tup1, v16__tup2, v16_z))\n # End maint Comp1_T_tup2 before \"Comp1_d_TUP21.remove((v16__tup1, v16__tup2, v16_z))\"\n # Begin maint _m_Comp1_d_TUP21_ubu before \"Comp1_d_TUP21.remove((v16__tup1, v16__tup2, v16_z))\"\n _maint__m_Comp1_d_TUP21_ubu_remove((v16__tup1, v16__tup2, v16_z))\n # End maint _m_Comp1_d_TUP21_ubu before \"Comp1_d_TUP21.remove((v16__tup1, v16__tup2, v16_z))\"\n Comp1_d_TUP21.remove((v16__tup1, v16__tup2, v16_z))\n\nComp1_T_tup1 = RCSet()\ndef _maint_Comp1_T_tup1_Comp1_d_M_add(_e):\n # Iterate {(v13_s, v13__tup1) : (v13_s, v13__tup1) in deltamatch(Comp1_d_M, 'bb', _e, 1)}\n (v13_s, v13__tup1) = _e\n if (v13__tup1 not in Comp1_T_tup1):\n Comp1_T_tup1.add(v13__tup1)\n # Begin maint Comp1_d_TUP21 after \"Comp1_T_tup1.add(v13__tup1)\"\n _maint_Comp1_d_TUP21_Comp1_T_tup1_add(v13__tup1)\n # End maint Comp1_d_TUP21 after \"Comp1_T_tup1.add(v13__tup1)\"\n else:\n Comp1_T_tup1.incref(v13__tup1)\n\ndef _maint_Comp1_T_tup1_Comp1_d_M_remove(_e):\n # Iterate {(v14_s, v14__tup1) : (v14_s, v14__tup1) in deltamatch(Comp1_d_M, 'bb', _e, 1)}\n (v14_s, v14__tup1) = _e\n if (Comp1_T_tup1.getref(v14__tup1) == 1):\n # Begin maint Comp1_d_TUP21 before \"Comp1_T_tup1.remove(v14__tup1)\"\n _maint_Comp1_d_TUP21_Comp1_T_tup1_remove(v14__tup1)\n # End maint Comp1_d_TUP21 before \"Comp1_T_tup1.remove(v14__tup1)\"\n Comp1_T_tup1.remove(v14__tup1)\n else:\n Comp1_T_tup1.decref(v14__tup1)\n\nComp1_d_M = RCSet()\ndef _maint_Comp1_d_M_Comp1_Ts_add(_e):\n # Iterate {(v9_s, v9__tup1) : v9_s in deltamatch(Comp1_Ts, 'b', _e, 1), (v9_s, v9__tup1) in _M}\n v9_s = _e\n if isinstance(v9_s, Set):\n for v9__tup1 in v9_s:\n Comp1_d_M.add((v9_s, v9__tup1))\n # Begin maint _m_Comp1_d_M_in after \"Comp1_d_M.add((v9_s, v9__tup1))\"\n _maint__m_Comp1_d_M_in_add((v9_s, v9__tup1))\n # End maint _m_Comp1_d_M_in after \"Comp1_d_M.add((v9_s, v9__tup1))\"\n # Begin maint Comp1_T_tup1 after \"Comp1_d_M.add((v9_s, v9__tup1))\"\n _maint_Comp1_T_tup1_Comp1_d_M_add((v9_s, v9__tup1))\n # End maint Comp1_T_tup1 after \"Comp1_d_M.add((v9_s, v9__tup1))\"\n\ndef _maint_Comp1_d_M_Comp1_Ts_remove(_e):\n # Iterate {(v10_s, v10__tup1) : v10_s in deltamatch(Comp1_Ts, 'b', _e, 1), (v10_s, v10__tup1) in _M}\n v10_s = _e\n if isinstance(v10_s, Set):\n for v10__tup1 in v10_s:\n # Begin maint Comp1_T_tup1 before \"Comp1_d_M.remove((v10_s, v10__tup1))\"\n _maint_Comp1_T_tup1_Comp1_d_M_remove((v10_s, v10__tup1))\n # End maint Comp1_T_tup1 before \"Comp1_d_M.remove((v10_s, v10__tup1))\"\n # Begin maint _m_Comp1_d_M_in before \"Comp1_d_M.remove((v10_s, v10__tup1))\"\n _maint__m_Comp1_d_M_in_remove((v10_s, v10__tup1))\n # End maint _m_Comp1_d_M_in before \"Comp1_d_M.remove((v10_s, v10__tup1))\"\n Comp1_d_M.remove((v10_s, v10__tup1))\n\ndef _maint_Comp1_d_M__M_add(_e):\n # Iterate {(v11_s, v11__tup1) : v11_s in Comp1_Ts, (v11_s, v11__tup1) in deltamatch(_M, 'bb', _e, 1)}\n (v11_s, v11__tup1) = _e\n if (v11_s in Comp1_Ts):\n Comp1_d_M.add((v11_s, v11__tup1))\n # Begin maint _m_Comp1_d_M_in after \"Comp1_d_M.add((v11_s, v11__tup1))\"\n _maint__m_Comp1_d_M_in_add((v11_s, v11__tup1))\n # End maint _m_Comp1_d_M_in after \"Comp1_d_M.add((v11_s, v11__tup1))\"\n # Begin maint Comp1_T_tup1 after \"Comp1_d_M.add((v11_s, v11__tup1))\"\n _maint_Comp1_T_tup1_Comp1_d_M_add((v11_s, v11__tup1))\n # End maint Comp1_T_tup1 after \"Comp1_d_M.add((v11_s, v11__tup1))\"\n\nComp1_Ts = RCSet()\ndef _maint_Comp1_Ts__U_Comp1_add(_e):\n # Iterate {v7_s : v7_s in deltamatch(_U_Comp1, 'b', _e, 1)}\n v7_s = _e\n Comp1_Ts.add(v7_s)\n # Begin maint Comp1_d_M after \"Comp1_Ts.add(v7_s)\"\n _maint_Comp1_d_M_Comp1_Ts_add(v7_s)\n # End maint Comp1_d_M after \"Comp1_Ts.add(v7_s)\"\n\ndef _maint_Comp1_Ts__U_Comp1_remove(_e):\n # Iterate {v8_s : v8_s in deltamatch(_U_Comp1, 'b', _e, 1)}\n v8_s = _e\n # Begin maint Comp1_d_M before \"Comp1_Ts.remove(v8_s)\"\n _maint_Comp1_d_M_Comp1_Ts_remove(v8_s)\n # End maint Comp1_d_M before \"Comp1_Ts.remove(v8_s)\"\n Comp1_Ts.remove(v8_s)\n\nComp1 = RCSet()\ndef _maint_Comp1__U_Comp1_add(_e):\n # Iterate {(v1_s, v1__tup1, v1__tup2, v1_z, v1_x, v1_y) : v1_s in deltamatch(_U_Comp1, 'b', _e, 1), (v1_s, v1__tup1) in _M, (v1__tup1, v1__tup2, v1_z) in _TUP2, (v1__tup2, v1_x, v1_y) in _TUP2, ((v1_x + v1_y) == v1_z)}\n v1_s = _e\n if isinstance(v1_s, Set):\n for v1__tup1 in v1_s:\n if (isinstance(v1__tup1, tuple) and (len(v1__tup1) == 2)):\n for (v1__tup2, v1_z) in setmatch({(v1__tup1, v1__tup1[0], v1__tup1[1])}, 'buu', v1__tup1):\n if (isinstance(v1__tup2, tuple) and (len(v1__tup2) == 2)):\n for (v1_x, v1_y) in setmatch({(v1__tup2, v1__tup2[0], v1__tup2[1])}, 'buu', v1__tup2):\n if ((v1_x + v1_y) == v1_z):\n if ((v1_s, v1_x) not in Comp1):\n Comp1.add((v1_s, v1_x))\n # Begin maint _m_Comp1_out after \"Comp1.add((v1_s, v1_x))\"\n _maint__m_Comp1_out_add((v1_s, v1_x))\n # End maint _m_Comp1_out after \"Comp1.add((v1_s, v1_x))\"\n else:\n Comp1.incref((v1_s, v1_x))\n\ndef _maint_Comp1__U_Comp1_remove(_e):\n # Iterate {(v2_s, v2__tup1, v2__tup2, v2_z, v2_x, v2_y) : v2_s in deltamatch(_U_Comp1, 'b', _e, 1), (v2_s, v2__tup1) in _M, (v2__tup1, v2__tup2, v2_z) in _TUP2, (v2__tup2, v2_x, v2_y) in _TUP2, ((v2_x + v2_y) == v2_z)}\n v2_s = _e\n if isinstance(v2_s, Set):\n for v2__tup1 in v2_s:\n if (isinstance(v2__tup1, tuple) and (len(v2__tup1) == 2)):\n for (v2__tup2, v2_z) in setmatch({(v2__tup1, v2__tup1[0], v2__tup1[1])}, 'buu', v2__tup1):\n if (isinstance(v2__tup2, tuple) and (len(v2__tup2) == 2)):\n for (v2_x, v2_y) in setmatch({(v2__tup2, v2__tup2[0], v2__tup2[1])}, 'buu', v2__tup2):\n if ((v2_x + v2_y) == v2_z):\n if (Comp1.getref((v2_s, v2_x)) == 1):\n # Begin maint _m_Comp1_out before \"Comp1.remove((v2_s, v2_x))\"\n _maint__m_Comp1_out_remove((v2_s, v2_x))\n # End maint _m_Comp1_out before \"Comp1.remove((v2_s, v2_x))\"\n Comp1.remove((v2_s, v2_x))\n else:\n Comp1.decref((v2_s, v2_x))\n\ndef _maint_Comp1__M_add(_e):\n # Iterate {(v3_s, v3__tup1, v3__tup2, v3_z, v3_x, v3_y) : v3_s in _U_Comp1, (v3_s, v3__tup1) in deltamatch(Comp1_d_M, 'bb', _e, 1), (v3_s, v3__tup1) in Comp1_d_M, (v3__tup1, v3__tup2, v3_z) in _TUP2, (v3__tup2, v3_x, v3_y) in _TUP2, ((v3_x + v3_y) == v3_z)}\n (v3_s, v3__tup1) = _e\n if (v3_s in _U_Comp1):\n if ((v3_s, v3__tup1) in Comp1_d_M):\n if (isinstance(v3__tup1, tuple) and (len(v3__tup1) == 2)):\n for (v3__tup2, v3_z) in setmatch({(v3__tup1, v3__tup1[0], v3__tup1[1])}, 'buu', v3__tup1):\n if (isinstance(v3__tup2, tuple) and (len(v3__tup2) == 2)):\n for (v3_x, v3_y) in setmatch({(v3__tup2, v3__tup2[0], v3__tup2[1])}, 'buu', v3__tup2):\n if ((v3_x + v3_y) == v3_z):\n if ((v3_s, v3_x) not in Comp1):\n Comp1.add((v3_s, v3_x))\n # Begin maint _m_Comp1_out after \"Comp1.add((v3_s, v3_x))\"\n _maint__m_Comp1_out_add((v3_s, v3_x))\n # End maint _m_Comp1_out after \"Comp1.add((v3_s, v3_x))\"\n else:\n Comp1.incref((v3_s, v3_x))\n\n_U_Comp1 = RCSet()\n_UEXT_Comp1 = Set()\ndef demand_Comp1(s):\n '{(s, x) : s in _U_Comp1, (s, _tup1) in _M, (_tup1, _tup2, z) in _TUP2, (_tup2, x, y) in _TUP2, ((x + y) == z)}'\n if (s not in _U_Comp1):\n _U_Comp1.add(s)\n # Begin maint Comp1_Ts after \"_U_Comp1.add(s)\"\n _maint_Comp1_Ts__U_Comp1_add(s)\n # End maint Comp1_Ts after \"_U_Comp1.add(s)\"\n # Begin maint Comp1 after \"_U_Comp1.add(s)\"\n _maint_Comp1__U_Comp1_add(s)\n # End maint Comp1 after \"_U_Comp1.add(s)\"\n else:\n _U_Comp1.incref(s)\n\ndef undemand_Comp1(s):\n '{(s, x) : s in _U_Comp1, (s, _tup1) in _M, (_tup1, _tup2, z) in _TUP2, (_tup2, x, y) in _TUP2, ((x + y) == z)}'\n if (_U_Comp1.getref(s) == 1):\n # Begin maint Comp1 before \"_U_Comp1.remove(s)\"\n _maint_Comp1__U_Comp1_remove(s)\n # End maint Comp1 before \"_U_Comp1.remove(s)\"\n # Begin maint Comp1_Ts before \"_U_Comp1.remove(s)\"\n _maint_Comp1_Ts__U_Comp1_remove(s)\n # End maint Comp1_Ts before \"_U_Comp1.remove(s)\"\n _U_Comp1.remove(s)\n else:\n _U_Comp1.decref(s)\n\ndef query_Comp1(s):\n '{(s, x) : s in _U_Comp1, (s, _tup1) in _M, (_tup1, _tup2, z) in _TUP2, (_tup2, x, y) in _TUP2, ((x + y) == z)}'\n if (s not in _UEXT_Comp1):\n _UEXT_Comp1.add(s)\n demand_Comp1(s)\n return True\n\ns1 = Set()\nfor i in [1, 2, 3]:\n s1.add(((i, (2 * i)), (3 * i)))\n # Begin maint Comp1_d_M after \"_M.add((s1, ((i, (2 * i)), (3 * i))))\"\n _maint_Comp1_d_M__M_add((s1, ((i, (2 * i)), (3 * i))))\n # End maint Comp1_d_M after \"_M.add((s1, ((i, (2 * i)), (3 * i))))\"\n # Begin maint Comp1 after \"_M.add((s1, ((i, (2 * i)), (3 * i))))\"\n _maint_Comp1__M_add((s1, ((i, (2 * i)), (3 * i))))\n # End maint Comp1 after \"_M.add((s1, ((i, (2 * i)), (3 * i))))\"\n s1.add((((2 * i), (3 * i)), (4 * i)))\n # Begin maint Comp1_d_M after \"_M.add((s1, (((2 * i), (3 * i)), (4 * i))))\"\n _maint_Comp1_d_M__M_add((s1, (((2 * i), (3 * i)), (4 * i))))\n # End maint Comp1_d_M after \"_M.add((s1, (((2 * i), (3 * i)), (4 * i))))\"\n # Begin maint Comp1 after \"_M.add((s1, (((2 * i), (3 * i)), (4 * i))))\"\n _maint_Comp1__M_add((s1, (((2 * i), (3 * i)), (4 * i))))\n # End maint Comp1 after \"_M.add((s1, (((2 * i), (3 * i)), (4 * i))))\"\ns = s1\nprint(sorted((query_Comp1(s) and (_m_Comp1_out[s] if (s in _m_Comp1_out) else set()))))" }, { "alpha_fraction": 0.48303934931755066, "alphanum_fraction": 0.48439618945121765, "avg_line_length": 23.566667556762695, "blob_id": "179cbf577efda6e0f7c9550b9c89f313cd4a10a3", "content_id": "001f6c1b7614a73d84891c0cf1dc48e19e5f265c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1474, "license_type": "no_license", "max_line_length": 68, "num_lines": 60, "path": "/incoq/tests/invinc/incast/test_error.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Unit tests for error.py.\"\"\"\n\n\nimport unittest\nimport sys\n\nfrom incoq.compiler.incast.structconv import *\nfrom incoq.compiler.incast.error import *\n\n\n# Use a derived class that doesn't rely on external unparsing logic.\nclass ProgramError(ProgramError):\n @classmethod\n def ts(cls, tree):\n return unparse_structast(tree)\n\n\nclass StructconvCase(unittest.TestCase):\n \n def test_programerror(self):\n tree = parse_structast('x; pass')\n exc = ProgramError('foo', node=tree.body[0],\n ast_context=[tree, tree.body[0]])\n s = exc.format_ast_context(exc.ast_context)\n exp_s = trim('''\n AST context (most local node last):\n ==== Module ====\n x\n pass\n ==== Expr ====\n x\n \n ''')\n self.assertEqual(s, exp_s)\n \n try:\n raise exc\n except ProgramError:\n exc_info = sys.exc_info()\n s = ''.join(exc.format_self(*exc_info))\n pat_s = trim('''\n Traceback \\(most recent call last\\):\n File .*\n raise exc\n .*ProgramError: foo\n \n AST context \\(most local node last\\):\n ==== Module ====\n x\n pass\n ==== Expr ====\n x\n \n ''')\n self.assertRegex(s, pat_s)\n del exc_info\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.4771437644958496, "alphanum_fraction": 0.5168663263320923, "avg_line_length": 34.25, "blob_id": "fad1f8fe369e802d79954dc46c47f349df0062a1", "content_id": "b702725fd607bef35c8c8a0b7bddbb743754bb23", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6344, "license_type": "no_license", "max_line_length": 98, "num_lines": 180, "path": "/incoq/tests/programs/comp/macroupdate_out.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from incoq.runtime import *\n# Comp1 := {x : (x, y) in E, f(y)}\n# Comp4 := {(x, z) : (x, y) in E, (y, z) in E}\n_m_E_in = Map()\ndef _maint__m_E_in_add(_e):\n (v7_1, v7_2) = _e\n if (v7_2 not in _m_E_in):\n _m_E_in[v7_2] = set()\n _m_E_in[v7_2].add(v7_1)\n\ndef _maint__m_E_in_remove(_e):\n (v8_1, v8_2) = _e\n _m_E_in[v8_2].remove(v8_1)\n if (len(_m_E_in[v8_2]) == 0):\n del _m_E_in[v8_2]\n\n_m_E_out = Map()\ndef _maint__m_E_out_add(_e):\n (v5_1, v5_2) = _e\n if (v5_1 not in _m_E_out):\n _m_E_out[v5_1] = set()\n _m_E_out[v5_1].add(v5_2)\n\ndef _maint__m_E_out_remove(_e):\n (v6_1, v6_2) = _e\n _m_E_out[v6_1].remove(v6_2)\n if (len(_m_E_out[v6_1]) == 0):\n del _m_E_out[v6_1]\n\nComp4 = RCSet()\ndef _maint_Comp4_E_add(_e):\n v3_DAS = set()\n # Iterate {(v3_x, v3_y, v3_z) : (v3_x, v3_y) in deltamatch(E, 'bb', _e, 1), (v3_y, v3_z) in E}\n (v3_x, v3_y) = _e\n for v3_z in (_m_E_out[v3_y] if (v3_y in _m_E_out) else set()):\n if ((v3_x, v3_y, v3_z) not in v3_DAS):\n v3_DAS.add((v3_x, v3_y, v3_z))\n # Iterate {(v3_x, v3_y, v3_z) : (v3_x, v3_y) in E, (v3_y, v3_z) in deltamatch(E, 'bb', _e, 1)}\n (v3_y, v3_z) = _e\n for v3_x in (_m_E_in[v3_y] if (v3_y in _m_E_in) else set()):\n if ((v3_x, v3_y, v3_z) not in v3_DAS):\n v3_DAS.add((v3_x, v3_y, v3_z))\n for (v3_x, v3_y, v3_z) in v3_DAS:\n if ((v3_x, v3_z) not in Comp4):\n Comp4.add((v3_x, v3_z))\n else:\n Comp4.incref((v3_x, v3_z))\n del v3_DAS\n\ndef _maint_Comp4_E_remove(_e):\n v4_DAS = set()\n # Iterate {(v4_x, v4_y, v4_z) : (v4_x, v4_y) in deltamatch(E, 'bb', _e, 1), (v4_y, v4_z) in E}\n (v4_x, v4_y) = _e\n for v4_z in (_m_E_out[v4_y] if (v4_y in _m_E_out) else set()):\n if ((v4_x, v4_y, v4_z) not in v4_DAS):\n v4_DAS.add((v4_x, v4_y, v4_z))\n # Iterate {(v4_x, v4_y, v4_z) : (v4_x, v4_y) in E, (v4_y, v4_z) in deltamatch(E, 'bb', _e, 1)}\n (v4_y, v4_z) = _e\n for v4_x in (_m_E_in[v4_y] if (v4_y in _m_E_in) else set()):\n if ((v4_x, v4_y, v4_z) not in v4_DAS):\n v4_DAS.add((v4_x, v4_y, v4_z))\n for (v4_x, v4_y, v4_z) in v4_DAS:\n if (Comp4.getref((v4_x, v4_z)) == 1):\n Comp4.remove((v4_x, v4_z))\n else:\n Comp4.decref((v4_x, v4_z))\n del v4_DAS\n\nComp1 = RCSet()\ndef _maint_Comp1_E_add(_e):\n # Iterate {(v1_x, v1_y) : (v1_x, v1_y) in deltamatch(E, 'bb', _e, 1), f(v1_y)}\n (v1_x, v1_y) = _e\n if f(v1_y):\n if (v1_x not in Comp1):\n Comp1.add(v1_x)\n else:\n Comp1.incref(v1_x)\n\ndef _maint_Comp1_E_remove(_e):\n # Iterate {(v2_x, v2_y) : (v2_x, v2_y) in deltamatch(E, 'bb', _e, 1), f(v2_y)}\n (v2_x, v2_y) = _e\n if f(v2_y):\n if (Comp1.getref(v2_x) == 1):\n Comp1.remove(v2_x)\n else:\n Comp1.decref(v2_x)\n\ndef f(y):\n return True\n\nE = Set()\nR = Set()\nT = Set()\nV = Set()\nfor (v1, v2) in [(1, 2), (1, 3), (2, 3), (3, 4)]:\n R.add((v1, v2))\nT.add((3, 4))\nV.add((5, 5))\ndef query():\n print(sorted(Comp1))\n print(sorted(Comp4))\n\nfor _upelem in R:\n if (_upelem not in E):\n E.add(_upelem)\n # Begin maint _m_E_in after \"E.add(_upelem)\"\n _maint__m_E_in_add(_upelem)\n # End maint _m_E_in after \"E.add(_upelem)\"\n # Begin maint _m_E_out after \"E.add(_upelem)\"\n _maint__m_E_out_add(_upelem)\n # End maint _m_E_out after \"E.add(_upelem)\"\n # Begin maint Comp4 after \"E.add(_upelem)\"\n _maint_Comp4_E_add(_upelem)\n # End maint Comp4 after \"E.add(_upelem)\"\n # Begin maint Comp1 after \"E.add(_upelem)\"\n _maint_Comp1_E_add(_upelem)\n # End maint Comp1 after \"E.add(_upelem)\"\nquery()\nfor _upelem in list(T):\n if (_upelem in E):\n # Begin maint Comp1 before \"E.remove(_upelem)\"\n _maint_Comp1_E_remove(_upelem)\n # End maint Comp1 before \"E.remove(_upelem)\"\n # Begin maint Comp4 before \"E.remove(_upelem)\"\n _maint_Comp4_E_remove(_upelem)\n # End maint Comp4 before \"E.remove(_upelem)\"\n # Begin maint _m_E_out before \"E.remove(_upelem)\"\n _maint__m_E_out_remove(_upelem)\n # End maint _m_E_out before \"E.remove(_upelem)\"\n # Begin maint _m_E_in before \"E.remove(_upelem)\"\n _maint__m_E_in_remove(_upelem)\n # End maint _m_E_in before \"E.remove(_upelem)\"\n E.remove(_upelem)\nquery()\nfor _upelem in list(V):\n if (_upelem in E):\n # Begin maint Comp1 before \"E.remove(_upelem)\"\n _maint_Comp1_E_remove(_upelem)\n # End maint Comp1 before \"E.remove(_upelem)\"\n # Begin maint Comp4 before \"E.remove(_upelem)\"\n _maint_Comp4_E_remove(_upelem)\n # End maint Comp4 before \"E.remove(_upelem)\"\n # Begin maint _m_E_out before \"E.remove(_upelem)\"\n _maint__m_E_out_remove(_upelem)\n # End maint _m_E_out before \"E.remove(_upelem)\"\n # Begin maint _m_E_in before \"E.remove(_upelem)\"\n _maint__m_E_in_remove(_upelem)\n # End maint _m_E_in before \"E.remove(_upelem)\"\n E.remove(_upelem)\n else:\n E.add(_upelem)\n # Begin maint _m_E_in after \"E.add(_upelem)\"\n _maint__m_E_in_add(_upelem)\n # End maint _m_E_in after \"E.add(_upelem)\"\n # Begin maint _m_E_out after \"E.add(_upelem)\"\n _maint__m_E_out_add(_upelem)\n # End maint _m_E_out after \"E.add(_upelem)\"\n # Begin maint Comp4 after \"E.add(_upelem)\"\n _maint_Comp4_E_add(_upelem)\n # End maint Comp4 after \"E.add(_upelem)\"\n # Begin maint Comp1 after \"E.add(_upelem)\"\n _maint_Comp1_E_add(_upelem)\n # End maint Comp1 after \"E.add(_upelem)\"\nquery()\nfor _upelem in list(E):\n if (_upelem not in V):\n # Begin maint Comp1 before \"E.remove(_upelem)\"\n _maint_Comp1_E_remove(_upelem)\n # End maint Comp1 before \"E.remove(_upelem)\"\n # Begin maint Comp4 before \"E.remove(_upelem)\"\n _maint_Comp4_E_remove(_upelem)\n # End maint Comp4 before \"E.remove(_upelem)\"\n # Begin maint _m_E_out before \"E.remove(_upelem)\"\n _maint__m_E_out_remove(_upelem)\n # End maint _m_E_out before \"E.remove(_upelem)\"\n # Begin maint _m_E_in before \"E.remove(_upelem)\"\n _maint__m_E_in_remove(_upelem)\n # End maint _m_E_in before \"E.remove(_upelem)\"\n E.remove(_upelem)\nquery()" }, { "alpha_fraction": 0.4290909171104431, "alphanum_fraction": 0.5163636207580566, "avg_line_length": 18.64285659790039, "blob_id": "13e1dec3577361b3031a0a966abd4805910d8708", "content_id": "e02667f5a2746b04f8378dc6be6f4d515a8a1d2c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 275, "license_type": "no_license", "max_line_length": 53, "num_lines": 14, "path": "/incoq/tests/programs/auxmap/equality_in.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "# Auxmaps with equality constraints.\n\nfrom incoq.runtime import *\n\nP = Set()\n\nfor v in [(1, 2, 2), (2, 2, 2), (3, 3, 3), (4, 1, 2),\n (5, 2, 3), (9, 9, 9)]:\n P.add(v)\n\nP.remove((9, 9, 9))\n\nprint(sorted(setmatch(P, 'uu2', ())))\nprint(sorted(setmatch(P, 'ub2', 2)))\n" }, { "alpha_fraction": 0.6077219843864441, "alphanum_fraction": 0.6193050146102905, "avg_line_length": 19.234375, "blob_id": "48dfe2cc0845192a3cb053680693d9413827d8ce", "content_id": "9903725e5ac405203e5343c3a3b4b049e18ee73b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1295, "license_type": "no_license", "max_line_length": 91, "num_lines": 64, "path": "/experiments/jql/jql_2_in.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "# JQL query, two levels.\n\nfrom incoq.runtime import *\n\nOPTIONS(\n obj_domain = True,\n)\n\nQUERYOPTIONS(\n '{(a, s) for a in ATTENDS for s in STUDENTS if a.course == COMP101 if a.student == s}',\n uset_mode = 'all',\n)\n\nclass Student(Obj):\n pass\n\nclass Course(Obj):\n pass\n\nclass Attends(Obj):\n pass\n\nATTENDS = Set()\nSTUDENTS = Set()\nCOURSES = Set()\n\n# Functions for constructing objects and adding them to\n# the global sets.\n\ndef make_student(name):\n s = Student()\n s.name = name\n STUDENTS.add(s)\n return s\n\ndef make_course(name):\n c = Course()\n c.name = name\n COURSES.add(c)\n return c\n\ndef make_attends(s, c):\n a = Attends()\n a.student = s\n a.course = c\n ATTENDS.add(a)\n return a\n\n# Replacement takes in an Attends object to remove,\n# and the parameters to a new Attends object to construct\n# and add to the set.\n\ndef replace_attends(old_a, new_s, new_c):\n new_a = make_attends(new_s, new_c)\n ATTENDS.remove(old_a)\n return new_a\n\ndef do_query(COMP101):\n return {(a, s) for a in ATTENDS for s in STUDENTS\n if a.course == COMP101 if a.student == s}\n\ndef do_query_nodemand(COMP101):\n return NODEMAND({(a, s) for a in ATTENDS for s in STUDENTS\n if a.course == COMP101 if a.student == s})\n" }, { "alpha_fraction": 0.4127209782600403, "alphanum_fraction": 0.4925827980041504, "avg_line_length": 39.67768478393555, "blob_id": "54f6a07d47efa811cc7996336465b24cf8118625", "content_id": "1d4a965afa5b5b789ddd5225d7a366be168781fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4921, "license_type": "no_license", "max_line_length": 115, "num_lines": 121, "path": "/incoq/tests/programs/comp/nested/param_out.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from incoq.runtime import *\n# Comp1 := {(a, a, c) : (a, b) in E, (b, c) in E}\n# Comp6 := {(a, (x, z)) : (x, y) in E, (a, y, z) in Comp1}\n_m_Comp6_out = Map()\ndef _maint__m_Comp6_out_add(_e):\n (v13_1, v13_2) = _e\n if (v13_1 not in _m_Comp6_out):\n _m_Comp6_out[v13_1] = set()\n _m_Comp6_out[v13_1].add(v13_2)\n\ndef _maint__m_Comp6_out_remove(_e):\n (v14_1, v14_2) = _e\n _m_Comp6_out[v14_1].remove(v14_2)\n if (len(_m_Comp6_out[v14_1]) == 0):\n del _m_Comp6_out[v14_1]\n\n_m_E_out = Map()\ndef _maint__m_E_out_add(_e):\n (v11_1, v11_2) = _e\n if (v11_1 not in _m_E_out):\n _m_E_out[v11_1] = set()\n _m_E_out[v11_1].add(v11_2)\n\n_m_E_in = Map()\ndef _maint__m_E_in_add(_e):\n (v9_1, v9_2) = _e\n if (v9_2 not in _m_E_in):\n _m_E_in[v9_2] = set()\n _m_E_in[v9_2].add(v9_1)\n\n_m_Comp1_ubu = Map()\ndef _maint__m_Comp1_ubu_add(_e):\n (v7_1, v7_2, v7_3) = _e\n if (v7_2 not in _m_Comp1_ubu):\n _m_Comp1_ubu[v7_2] = set()\n _m_Comp1_ubu[v7_2].add((v7_1, v7_3))\n\ndef _maint__m_Comp1_ubu_remove(_e):\n (v8_1, v8_2, v8_3) = _e\n _m_Comp1_ubu[v8_2].remove((v8_1, v8_3))\n if (len(_m_Comp1_ubu[v8_2]) == 0):\n del _m_Comp1_ubu[v8_2]\n\nComp6 = RCSet()\ndef _maint_Comp6_E_add(_e):\n # Iterate {(v3_x, v3_y, v3_a, v3_z) : (v3_x, v3_y) in deltamatch(E, 'bb', _e, 1), (v3_a, v3_y, v3_z) in Comp1}\n (v3_x, v3_y) = _e\n for (v3_a, v3_z) in (_m_Comp1_ubu[v3_y] if (v3_y in _m_Comp1_ubu) else set()):\n if ((v3_a, (v3_x, v3_z)) not in Comp6):\n Comp6.add((v3_a, (v3_x, v3_z)))\n # Begin maint _m_Comp6_out after \"Comp6.add((v3_a, (v3_x, v3_z)))\"\n _maint__m_Comp6_out_add((v3_a, (v3_x, v3_z)))\n # End maint _m_Comp6_out after \"Comp6.add((v3_a, (v3_x, v3_z)))\"\n else:\n Comp6.incref((v3_a, (v3_x, v3_z)))\n\ndef _maint_Comp6_Comp1_add(_e):\n # Iterate {(v5_x, v5_y, v5_a, v5_z) : (v5_x, v5_y) in E, (v5_a, v5_y, v5_z) in deltamatch(Comp1, 'bbb', _e, 1)}\n (v5_a, v5_y, v5_z) = _e\n for v5_x in (_m_E_in[v5_y] if (v5_y in _m_E_in) else set()):\n if ((v5_a, (v5_x, v5_z)) not in Comp6):\n Comp6.add((v5_a, (v5_x, v5_z)))\n # Begin maint _m_Comp6_out after \"Comp6.add((v5_a, (v5_x, v5_z)))\"\n _maint__m_Comp6_out_add((v5_a, (v5_x, v5_z)))\n # End maint _m_Comp6_out after \"Comp6.add((v5_a, (v5_x, v5_z)))\"\n else:\n Comp6.incref((v5_a, (v5_x, v5_z)))\n\ndef _maint_Comp6_Comp1_remove(_e):\n # Iterate {(v6_x, v6_y, v6_a, v6_z) : (v6_x, v6_y) in E, (v6_a, v6_y, v6_z) in deltamatch(Comp1, 'bbb', _e, 1)}\n (v6_a, v6_y, v6_z) = _e\n for v6_x in (_m_E_in[v6_y] if (v6_y in _m_E_in) else set()):\n if (Comp6.getref((v6_a, (v6_x, v6_z))) == 1):\n # Begin maint _m_Comp6_out before \"Comp6.remove((v6_a, (v6_x, v6_z)))\"\n _maint__m_Comp6_out_remove((v6_a, (v6_x, v6_z)))\n # End maint _m_Comp6_out before \"Comp6.remove((v6_a, (v6_x, v6_z)))\"\n Comp6.remove((v6_a, (v6_x, v6_z)))\n else:\n Comp6.decref((v6_a, (v6_x, v6_z)))\n\nComp1 = RCSet()\ndef _maint_Comp1_E_add(_e):\n v1_DAS = set()\n # Iterate {(v1_a, v1_b, v1_c) : (v1_a, v1_b) in deltamatch(E, 'bb', _e, 1), (v1_b, v1_c) in E}\n (v1_a, v1_b) = _e\n for v1_c in (_m_E_out[v1_b] if (v1_b in _m_E_out) else set()):\n if ((v1_a, v1_b, v1_c) not in v1_DAS):\n v1_DAS.add((v1_a, v1_b, v1_c))\n # Iterate {(v1_a, v1_b, v1_c) : (v1_a, v1_b) in E, (v1_b, v1_c) in deltamatch(E, 'bb', _e, 1)}\n (v1_b, v1_c) = _e\n for v1_a in (_m_E_in[v1_b] if (v1_b in _m_E_in) else set()):\n if ((v1_a, v1_b, v1_c) not in v1_DAS):\n v1_DAS.add((v1_a, v1_b, v1_c))\n for (v1_a, v1_b, v1_c) in v1_DAS:\n if ((v1_a, v1_a, v1_c) not in Comp1):\n Comp1.add((v1_a, v1_a, v1_c))\n # Begin maint _m_Comp1_ubu after \"Comp1.add((v1_a, v1_a, v1_c))\"\n _maint__m_Comp1_ubu_add((v1_a, v1_a, v1_c))\n # End maint _m_Comp1_ubu after \"Comp1.add((v1_a, v1_a, v1_c))\"\n # Begin maint Comp6 after \"Comp1.add((v1_a, v1_a, v1_c))\"\n _maint_Comp6_Comp1_add((v1_a, v1_a, v1_c))\n # End maint Comp6 after \"Comp1.add((v1_a, v1_a, v1_c))\"\n else:\n Comp1.incref((v1_a, v1_a, v1_c))\n del v1_DAS\n\nfor (v1, v2) in [(1, 2), (2, 3), (3, 4), (4, 5)]:\n # Begin maint _m_E_out after \"E.add((v1, v2))\"\n _maint__m_E_out_add((v1, v2))\n # End maint _m_E_out after \"E.add((v1, v2))\"\n # Begin maint _m_E_in after \"E.add((v1, v2))\"\n _maint__m_E_in_add((v1, v2))\n # End maint _m_E_in after \"E.add((v1, v2))\"\n # Begin maint Comp6 after \"E.add((v1, v2))\"\n _maint_Comp6_E_add((v1, v2))\n # End maint Comp6 after \"E.add((v1, v2))\"\n # Begin maint Comp1 after \"E.add((v1, v2))\"\n _maint_Comp1_E_add((v1, v2))\n # End maint Comp1 after \"E.add((v1, v2))\"\na = 2\nprint(sorted((_m_Comp6_out[a] if (a in _m_Comp6_out) else set())))" }, { "alpha_fraction": 0.46543341875076294, "alphanum_fraction": 0.47637051343917847, "avg_line_length": 37.57291793823242, "blob_id": "df2f27ccaf773293b827959cf79d2ce2d95f2b81", "content_id": "5e101d65ab44c3df59261646894a416228d1a7a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7406, "license_type": "no_license", "max_line_length": 74, "num_lines": 192, "path": "/incoq/tests/invinc/comp/test_join.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Unit tests for join.py.\"\"\"\n\n\nimport unittest\n\nimport incoq.compiler.incast as L\nfrom incoq.compiler.comp.clause import (EnumClause, CondClause,\n ClauseFactory as CF)\nfrom incoq.compiler.comp.join import *\n\n\nclass Jincoqase(unittest.TestCase):\n \n def make_join(self, source, delta=None):\n join = Join.from_comp(L.pe(\n 'COMP({{... {}}}, [], {{}})'.format(source)),\n CF)\n join = join._replace(delta=delta)\n return join\n \n def test_basic(self):\n cl1 = EnumClause(('a', 'b'), 'R')\n cl2 = EnumClause(('b', 'c'), 'S')\n cl3 = CondClause(L.pe('a != c'))\n join = Join([cl1, cl2, cl3], CF, None)\n \n # AST round-trip.\n comp = join.to_comp({})\n exp_comp = L.Comp(L.pe('(a, b, c)'),\n (cl1.to_AST(), cl2.to_AST(), cl3.to_AST()),\n (), {})\n self.assertEqual(comp, exp_comp)\n join2 = Join.from_comp(exp_comp, CF)\n self.assertEqual(join, join2)\n \n # Attributes.\n self.assertEqual(join.enumvars, ('a', 'b', 'c'))\n self.assertEqual(join.vars, ('a', 'b', 'c'))\n self.assertEqual(join.rels, ('R', 'S'))\n self.assertTrue(join.robust)\n self.assertEqual(join.has_wildcards, False)\n self.assertIs(join.delta, None)\n \n # Rewriting/prefixing.\n \n cl1a = EnumClause(('z', 'b'), 'R')\n cl3a = CondClause(L.pe('z != c'))\n join2 = join.rewrite_subst({'a': 'z'})\n self.assertEqual(join2, Join([cl1a, cl2, cl3a], CF, None))\n \n cl1b = EnumClause(('_a', '_b'), 'R')\n cl2b = EnumClause(('_b', '_c'), 'S')\n cl3b = CondClause(L.pe('_a != _c'))\n join3 = join.prefix_enumvars('_')\n self.assertEqual(join3, Join([cl1b, cl2b, cl3b], CF, None))\n \n def test_elimeq(self):\n # Basic.\n join = self.make_join('for (a, b) in R for (b, c) in R if a == b')\n join, subst = join.elim_equalities([])\n exp_join = self.make_join('for (a, a) in R for (a, c) in R')\n exp_subst = {'a': 'a', 'b': 'a'}\n self.assertEqual(join, exp_join)\n self.assertEqual(subst, exp_subst)\n \n # Keepvars.\n join = self.make_join('for (p1, p2, v1, v2, v3) in S '\n 'if p1 == p2 if p2 == v1 if v2 == v3')\n join, subst = join.elim_equalities(['p1', 'p2'])\n exp_join = self.make_join('for (p1, p2, p2, v2, v2) in S '\n 'if p1 == p2')\n exp_subst = {'p2': 'p2', 'v1': 'p2', 'v2': 'v2', 'v3': 'v2'}\n self.assertEqual(join, exp_join)\n self.assertEqual(subst, exp_subst)\n \n # Convert membership condition clauses to enumerators.\n join = self.make_join('for (x, y) in S if x in T')\n join, subst = join.elim_equalities([])\n exp_join = self.make_join('for (x, y) in S for x in T')\n exp_subst = {}\n self.assertEqual(join, exp_join)\n self.assertEqual(subst, exp_subst)\n \n def test_makewild(self):\n join = self.make_join(\n 'for (a, a, b, c, d, e) in R for b in S if c')\n join = join.make_wildcards(['d'])\n exp_join = self.make_join(\n 'for (a, a, b, c, d, _) in R for b in S if c')\n self.assertEqual(join, exp_join)\n \n def test_makeeq(self):\n join = self.make_join(\n 'for (a, a) in R for (a, b, c) in S '\n 'for (a, b) in T if f(a) == g(b)')\n join = join.make_equalities(['b'])\n \n exp_join = self.make_join(\n 'for (a, a_2) in R if a == a_2 '\n 'for (a_3, b_2, c) in S if a_2 == a_3 if b == b_2 '\n 'if (a, b) in T if f(a) == g(b)')\n \n self.assertEqual(join, exp_join)\n \n def test_elimwild(self):\n join = self.make_join('for _ in R for _ in S')\n join = join.elim_wildcards()\n exp_join = self.make_join('for _v1 in R for _v2 in S')\n self.assertEqual(join, exp_join)\n \n def test_maintjoins(self):\n join = self.make_join('for (a, b) in R for (b, c) in R')\n \n # Disjoint, subtractive.\n mjoins = join.get_maint_joins(L.pe('e'), 'R', 'add', '',\n disjoint_strat='sub')\n exp_mjoin1 = self.make_join('''\n for (a, b) in deltamatch(R, \"bb\", e, 1)\n for (b, c) in R - {e}''',\n DeltaInfo('R', L.pe('e'), ('a', 'b'), 'add'))\n exp_mjoin2 = self.make_join('''\n for (a, b) in R\n for (b, c) in deltamatch(R, \"bb\", e, 1)''',\n DeltaInfo('R', L.pe('e'), ('b', 'c'), 'add'))\n self.assertSequenceEqual(mjoins, [exp_mjoin1, exp_mjoin2])\n \n # Disjoint, augmented.\n mjoins = join.get_maint_joins(L.pe('e'), 'R', 'add', '',\n disjoint_strat='aug')\n exp_mjoin1 = self.make_join('''\n for (a, b) in deltamatch(R, \"bb\", e, 0)\n for (b, c) in R + {e}''',\n DeltaInfo('R', L.pe('e'), ('a', 'b'), 'add'))\n exp_mjoin2 = self.make_join('''\n for (a, b) in R\n for (b, c) in deltamatch(R, \"bb\", e, 0)''',\n DeltaInfo('R', L.pe('e'), ('b', 'c'), 'add'))\n self.assertSequenceEqual(mjoins, [exp_mjoin1, exp_mjoin2])\n \n # Not disjoint. With prefix.\n mjoins = join.get_maint_joins(L.pe('e'), 'R', 'add', '_',\n disjoint_strat='das')\n exp_mjoin1 = self.make_join('''\n for (_a, _b) in deltamatch(R, \"bb\", e, 1)\n for (_b, _c) in R''',\n DeltaInfo('R', L.pe('e'), ('_a', '_b'), 'add'))\n exp_mjoin2 = self.make_join('''\n for (_a, _b) in R\n for (_b, _c) in deltamatch(R, \"bb\", e, 1)''',\n DeltaInfo('R', L.pe('e'), ('_b', '_c'), 'add'))\n self.assertSequenceEqual(mjoins, [exp_mjoin1, exp_mjoin2])\n \n def test_memberconds(self):\n # Make sure nothing strange happens when converting between\n # membership conditions and enumerators, when the enumerators\n # aren't ordinary EnumClauses.\n \n # Round-trip SubClause.\n \n orig_join = self.make_join('for (x, y) in S if x in T - {e}')\n join, _subst = orig_join.elim_equalities([])\n exp_join = self.make_join('for (x, y) in S for x in T - {e}')\n self.assertEqual(join, exp_join)\n \n join = exp_join.make_equalities([])\n self.assertEqual(join, orig_join)\n \n # Round-trip SingletonClause.\n \n orig_join = self.make_join('for (x, y) in S if x in {e}')\n join, _subst = orig_join.elim_equalities([])\n exp_join = self.make_join('for (x, y) in S for x in {e}')\n self.assertEqual(join, exp_join)\n \n join = exp_join.make_equalities([])\n self.assertEqual(join, orig_join)\n \n def test_code(self):\n join = self.make_join('for (a, b) in R for (b, c) in S')\n code = join.get_code(['c'], L.pc('pass'), augmented=False)\n \n exp_code = L.pc('''\n for b in setmatch(S, 'ub', c):\n for a in setmatch(R, 'ub', b):\n pass\n ''')\n \n self.assertEqual(code, exp_code)\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.4825737178325653, "alphanum_fraction": 0.51474529504776, "avg_line_length": 17.649999618530273, "blob_id": "91cc304ed86118178d6eac9632affe22673663d6", "content_id": "4d54521782277982afc5a9719ffe01d258709467", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 373, "license_type": "no_license", "max_line_length": 56, "num_lines": 20, "path": "/incoq/tests/programs/deminc/tup/objnest_in.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "# Nested tuples in membership clauses, in object domain.\n\nfrom incoq.runtime import *\n\nOPTIONS(\n obj_domain = True,\n)\n\ns1 = Set()\nfor i in [1, 2, 3]:\n s1.add(((i, 2*i), 3*i))\n s1.add(((2*i, 3*i), 4*i))\n\nQUERYOPTIONS(\n '{x for ((x, y), z) in s if x + y == z}',\n params = ['s'],\n impl = 'dem',\n)\ns = s1\nprint(sorted({x for ((x, y), z) in s if x + y == z}))\n" }, { "alpha_fraction": 0.5349794030189514, "alphanum_fraction": 0.5452674627304077, "avg_line_length": 14.677419662475586, "blob_id": "97583cb63f75e62957eb1d33af686e752db89386", "content_id": "5b9632ff919b1337e7fd28ab5a4ee0a850d301ee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 486, "license_type": "no_license", "max_line_length": 58, "num_lines": 31, "path": "/incoq/tests/programs/objcomp/expr_in.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "# Comprehensions with result expressions that are not just\n# simple variables.\n\nfrom incoq.runtime import *\n\nOPTIONS(\n obj_domain = True,\n)\n\nQUERYOPTIONS(\n '{o.i + 1 for o in s}',\n params = ['s'],\n uset_mode = 'none',\n impl = 'inc',\n)\n\nQUERYOPTIONS(\n '{None for o in s}',\n params = ['s'],\n uset_mode = 'none',\n impl = 'inc',\n)\n\ns = Set()\nfor i in [1, 2, 3]:\n o = Obj()\n o.i = i\n s.add(o)\n\nprint(sorted({o.i + 1 for o in s}))\nprint({None for o in s})\n" }, { "alpha_fraction": 0.45679011940956116, "alphanum_fraction": 0.5111111402511597, "avg_line_length": 29.037036895751953, "blob_id": "c5f4ec08576e922e3351d9a12c36558860d7b17c", "content_id": "cd39f6db08560deda64c9cc1562e42711562192b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 810, "license_type": "no_license", "max_line_length": 75, "num_lines": 27, "path": "/incoq/tests/programs/comp/unhandled_out.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from incoq.runtime import *\n# Comp1 := {(x, y) : x in N, y in N}\nComp1 = RCSet()\ndef _maint_Comp1_N_add(_e):\n v1_DAS = set()\n # Iterate {(v1_x, v1_y) : v1_x in deltamatch(N, 'b', _e, 1), v1_y in N}\n v1_x = _e\n for v1_y in N:\n if ((v1_x, v1_y) not in v1_DAS):\n v1_DAS.add((v1_x, v1_y))\n # Iterate {(v1_x, v1_y) : v1_x in N, v1_y in deltamatch(N, 'b', _e, 1)}\n v1_y = _e\n for v1_x in N:\n if ((v1_x, v1_y) not in v1_DAS):\n v1_DAS.add((v1_x, v1_y))\n for (v1_x, v1_y) in v1_DAS:\n Comp1.add((v1_x, v1_y))\n del v1_DAS\n\nN = Set()\nfor i in range(3):\n N.add(i)\n # Begin maint Comp1 after \"N.add(i)\"\n _maint_Comp1_N_add(i)\n # End maint Comp1 after \"N.add(i)\"\nprint(sorted(Comp1))\nprint(sorted({(x, y) for x in range(3) for y in range(3)}))" }, { "alpha_fraction": 0.5091185569763184, "alphanum_fraction": 0.5395137071609497, "avg_line_length": 15.84615421295166, "blob_id": "18b49aa5f5d391f7aea6ade244a14d5205e34640", "content_id": "e01911fcb9bc5074c208bf455806104c0ebaeeec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 658, "license_type": "no_license", "max_line_length": 71, "num_lines": 39, "path": "/incoq/tests/programs/comp/macroupdate_in.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "# Make sure macro updates are handled.\n\nfrom incoq.runtime import *\n\nQUERYOPTIONS(\n '{x for (x, y) in E if f(y)}',\n impl = 'inc',\n)\nQUERYOPTIONS(\n '{(x, z) for (x, y) in E for (y2, z) in E if y == y2}',\n impl = 'inc',\n)\n\ndef f(y):\n return True\n\nE = Set()\nR = Set()\nT = Set()\nV = Set()\n\nfor v1, v2 in [(1, 2), (1, 3), (2, 3), (3, 4)]:\n R.add((v1, v2))\n\nT.add((3, 4))\nV.add((5, 5))\n\ndef query():\n print(sorted({x for (x, y) in E if f(y)}))\n print(sorted({(x, z) for (x, y) in E for (y2, z) in E if y == y2}))\n\nE.update(R)\nquery()\nE.difference_update(T)\nquery()\nE.symmetric_difference_update(V)\nquery()\nE.intersection_update(V)\nquery()\n\n" }, { "alpha_fraction": 0.41048210859298706, "alphanum_fraction": 0.4780956208705902, "avg_line_length": 38.0625, "blob_id": "29a3a2e3cab6cefe5ffd6b8e8f70e6ed173c0f21", "content_id": "f6a666852353d52ab36b4dc57b60c068de9b2f26", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4999, "license_type": "no_license", "max_line_length": 120, "num_lines": 128, "path": "/incoq/tests/programs/comp/nested/obj_out.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from incoq.runtime import *\n# Comp1 := {(s, x) : (s, x) in _M, (x, x_a) in _F_a, (x_a > 1)}\n# Comp6 := {(s, y_b) : (s, y) in Comp1, (y, y_b) in _F_b}\n_m_Comp6_out = Map()\ndef _maint__m_Comp6_out_add(_e):\n (v13_1, v13_2) = _e\n if (v13_1 not in _m_Comp6_out):\n _m_Comp6_out[v13_1] = set()\n _m_Comp6_out[v13_1].add(v13_2)\n\ndef _maint__m_Comp6_out_remove(_e):\n (v14_1, v14_2) = _e\n _m_Comp6_out[v14_1].remove(v14_2)\n if (len(_m_Comp6_out[v14_1]) == 0):\n del _m_Comp6_out[v14_1]\n\n_m__M_in = Map()\ndef _maint__m__M_in_add(_e):\n (v11_1, v11_2) = _e\n if (v11_2 not in _m__M_in):\n _m__M_in[v11_2] = set()\n _m__M_in[v11_2].add(v11_1)\n\n_m_Comp1_in = Map()\ndef _maint__m_Comp1_in_add(_e):\n (v9_1, v9_2) = _e\n if (v9_2 not in _m_Comp1_in):\n _m_Comp1_in[v9_2] = set()\n _m_Comp1_in[v9_2].add(v9_1)\n\ndef _maint__m_Comp1_in_remove(_e):\n (v10_1, v10_2) = _e\n _m_Comp1_in[v10_2].remove(v10_1)\n if (len(_m_Comp1_in[v10_2]) == 0):\n del _m_Comp1_in[v10_2]\n\nComp6 = RCSet()\ndef _maint_Comp6_Comp1_add(_e):\n # Iterate {(v5_s, v5_y, v5_y_b) : (v5_s, v5_y) in deltamatch(Comp1, 'bb', _e, 1), (v5_y, v5_y_b) in _F_b}\n (v5_s, v5_y) = _e\n if hasattr(v5_y, 'b'):\n v5_y_b = v5_y.b\n if ((v5_s, v5_y_b) not in Comp6):\n Comp6.add((v5_s, v5_y_b))\n # Begin maint _m_Comp6_out after \"Comp6.add((v5_s, v5_y_b))\"\n _maint__m_Comp6_out_add((v5_s, v5_y_b))\n # End maint _m_Comp6_out after \"Comp6.add((v5_s, v5_y_b))\"\n else:\n Comp6.incref((v5_s, v5_y_b))\n\ndef _maint_Comp6_Comp1_remove(_e):\n # Iterate {(v6_s, v6_y, v6_y_b) : (v6_s, v6_y) in deltamatch(Comp1, 'bb', _e, 1), (v6_y, v6_y_b) in _F_b}\n (v6_s, v6_y) = _e\n if hasattr(v6_y, 'b'):\n v6_y_b = v6_y.b\n if (Comp6.getref((v6_s, v6_y_b)) == 1):\n # Begin maint _m_Comp6_out before \"Comp6.remove((v6_s, v6_y_b))\"\n _maint__m_Comp6_out_remove((v6_s, v6_y_b))\n # End maint _m_Comp6_out before \"Comp6.remove((v6_s, v6_y_b))\"\n Comp6.remove((v6_s, v6_y_b))\n else:\n Comp6.decref((v6_s, v6_y_b))\n\ndef _maint_Comp6__F_b_add(_e):\n # Iterate {(v7_s, v7_y, v7_y_b) : (v7_s, v7_y) in Comp1, (v7_y, v7_y_b) in deltamatch(_F_b, 'bb', _e, 1)}\n (v7_y, v7_y_b) = _e\n for v7_s in (_m_Comp1_in[v7_y] if (v7_y in _m_Comp1_in) else set()):\n if ((v7_s, v7_y_b) not in Comp6):\n Comp6.add((v7_s, v7_y_b))\n # Begin maint _m_Comp6_out after \"Comp6.add((v7_s, v7_y_b))\"\n _maint__m_Comp6_out_add((v7_s, v7_y_b))\n # End maint _m_Comp6_out after \"Comp6.add((v7_s, v7_y_b))\"\n else:\n Comp6.incref((v7_s, v7_y_b))\n\nComp1 = RCSet()\ndef _maint_Comp1__M_add(_e):\n # Iterate {(v1_s, v1_x, v1_x_a) : (v1_s, v1_x) in deltamatch(_M, 'bb', _e, 1), (v1_x, v1_x_a) in _F_a, (v1_x_a > 1)}\n (v1_s, v1_x) = _e\n if hasattr(v1_x, 'a'):\n v1_x_a = v1_x.a\n if (v1_x_a > 1):\n if ((v1_s, v1_x) not in Comp1):\n Comp1.add((v1_s, v1_x))\n # Begin maint _m_Comp1_in after \"Comp1.add((v1_s, v1_x))\"\n _maint__m_Comp1_in_add((v1_s, v1_x))\n # End maint _m_Comp1_in after \"Comp1.add((v1_s, v1_x))\"\n # Begin maint Comp6 after \"Comp1.add((v1_s, v1_x))\"\n _maint_Comp6_Comp1_add((v1_s, v1_x))\n # End maint Comp6 after \"Comp1.add((v1_s, v1_x))\"\n else:\n Comp1.incref((v1_s, v1_x))\n\ndef _maint_Comp1__F_a_add(_e):\n # Iterate {(v3_s, v3_x, v3_x_a) : (v3_s, v3_x) in _M, (v3_x, v3_x_a) in deltamatch(_F_a, 'bb', _e, 1), (v3_x_a > 1)}\n (v3_x, v3_x_a) = _e\n if (v3_x_a > 1):\n for v3_s in (_m__M_in[v3_x] if (v3_x in _m__M_in) else set()):\n if ((v3_s, v3_x) not in Comp1):\n Comp1.add((v3_s, v3_x))\n # Begin maint _m_Comp1_in after \"Comp1.add((v3_s, v3_x))\"\n _maint__m_Comp1_in_add((v3_s, v3_x))\n # End maint _m_Comp1_in after \"Comp1.add((v3_s, v3_x))\"\n # Begin maint Comp6 after \"Comp1.add((v3_s, v3_x))\"\n _maint_Comp6_Comp1_add((v3_s, v3_x))\n # End maint Comp6 after \"Comp1.add((v3_s, v3_x))\"\n else:\n Comp1.incref((v3_s, v3_x))\n\ns = Set()\nfor i in [1, 2, 3]:\n o = Obj()\n o.a = i\n # Begin maint Comp1 after \"_F_a.add((o, i))\"\n _maint_Comp1__F_a_add((o, i))\n # End maint Comp1 after \"_F_a.add((o, i))\"\n o.b = (i * 2)\n # Begin maint Comp6 after \"_F_b.add((o, (i * 2)))\"\n _maint_Comp6__F_b_add((o, (i * 2)))\n # End maint Comp6 after \"_F_b.add((o, (i * 2)))\"\n s.add(o)\n # Begin maint _m__M_in after \"_M.add((s, o))\"\n _maint__m__M_in_add((s, o))\n # End maint _m__M_in after \"_M.add((s, o))\"\n # Begin maint Comp1 after \"_M.add((s, o))\"\n _maint_Comp1__M_add((s, o))\n # End maint Comp1 after \"_M.add((s, o))\"\nprint(sorted((_m_Comp6_out[s] if (s in _m_Comp6_out) else set())))" }, { "alpha_fraction": 0.43105700612068176, "alphanum_fraction": 0.5097998976707458, "avg_line_length": 41.39649963378906, "blob_id": "1f475f81f9f2bc631d6122f6118aa7b52e05bf91", "content_id": "9574d73a89148485d922b2df2fd003e6521a6cb6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14541, "license_type": "no_license", "max_line_length": 174, "num_lines": 343, "path": "/incoq/tests/programs/deminc/aug2_out.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from incoq.runtime import *\n# Comp1 := {z : _ in _U_Comp1, (x, x) in E, (x, y) in E, (y, z) in S}\n# Comp1_Tx1 := {x : (x, x) in E}\n# Comp1_dE2 := {(x, y) : x in Comp1_Tx1, (x, y) in E}\n# Comp1_Ty1 := {y : (x, y) in Comp1_dE2}\n# Comp1_dS := {(y, z) : y in Comp1_Ty1, (y, z) in S}\n_m_E_b1 = Map()\ndef _maint__m_E_b1_add(_e):\n (v31_1, v31_2) = _e\n if ((v31_1 == v31_2)):\n if (v31_1 not in _m_E_b1):\n _m_E_b1[v31_1] = set()\n _m_E_b1[v31_1].add(())\n\n_m_Comp1_dS_out = Map()\ndef _maint__m_Comp1_dS_out_add(_e):\n (v29_1, v29_2) = _e\n if (v29_1 not in _m_Comp1_dS_out):\n _m_Comp1_dS_out[v29_1] = set()\n _m_Comp1_dS_out[v29_1].add(v29_2)\n\ndef _maint__m_Comp1_dS_out_remove(_e):\n (v30_1, v30_2) = _e\n _m_Comp1_dS_out[v30_1].remove(v30_2)\n if (len(_m_Comp1_dS_out[v30_1]) == 0):\n del _m_Comp1_dS_out[v30_1]\n\n_m_Comp1_dE2_out = Map()\ndef _maint__m_Comp1_dE2_out_add(_e):\n (v27_1, v27_2) = _e\n if (v27_1 not in _m_Comp1_dE2_out):\n _m_Comp1_dE2_out[v27_1] = set()\n _m_Comp1_dE2_out[v27_1].add(v27_2)\n\ndef _maint__m_Comp1_dE2_out_remove(_e):\n (v28_1, v28_2) = _e\n _m_Comp1_dE2_out[v28_1].remove(v28_2)\n if (len(_m_Comp1_dE2_out[v28_1]) == 0):\n del _m_Comp1_dE2_out[v28_1]\n\n_m_E_u1 = Map()\ndef _maint__m_E_u1_add(_e):\n (v25_1, v25_2) = _e\n if ((v25_1 == v25_2)):\n if (() not in _m_E_u1):\n _m_E_u1[()] = set()\n _m_E_u1[()].add(v25_1)\n\n_m__U_Comp1_w = Map()\ndef _maint__m__U_Comp1_w_add(_e):\n if (() not in _m__U_Comp1_w):\n _m__U_Comp1_w[()] = RCSet()\n if (() not in _m__U_Comp1_w[()]):\n _m__U_Comp1_w[()].add(())\n else:\n _m__U_Comp1_w[()].incref(())\n\ndef _maint__m__U_Comp1_w_remove(_e):\n if (_m__U_Comp1_w[()].getref(()) == 1):\n _m__U_Comp1_w[()].remove(())\n else:\n _m__U_Comp1_w[()].decref(())\n if (len(_m__U_Comp1_w[()]) == 0):\n del _m__U_Comp1_w[()]\n\n_m_E_out = Map()\ndef _maint__m_E_out_add(_e):\n (v21_1, v21_2) = _e\n if (v21_1 not in _m_E_out):\n _m_E_out[v21_1] = set()\n _m_E_out[v21_1].add(v21_2)\n\n_m_S_out = Map()\ndef _maint__m_S_out_add(_e):\n (v19_1, v19_2) = _e\n if (v19_1 not in _m_S_out):\n _m_S_out[v19_1] = set()\n _m_S_out[v19_1].add(v19_2)\n\ndef _maint__m_S_out_remove(_e):\n (v20_1, v20_2) = _e\n _m_S_out[v20_1].remove(v20_2)\n if (len(_m_S_out[v20_1]) == 0):\n del _m_S_out[v20_1]\n\nComp1_dS = RCSet()\ndef _maint_Comp1_dS_Comp1_Ty1_add(_e):\n # Iterate {(v15_y, v15_z) : v15_y in deltamatch(Comp1_Ty1, 'b', _e, 0), (v15_y, v15_z) in S}\n v15_y = _e\n for v15_z in (_m_S_out[v15_y] if (v15_y in _m_S_out) else set()):\n Comp1_dS.add((v15_y, v15_z))\n # Begin maint _m_Comp1_dS_out after \"Comp1_dS.add((v15_y, v15_z))\"\n _maint__m_Comp1_dS_out_add((v15_y, v15_z))\n # End maint _m_Comp1_dS_out after \"Comp1_dS.add((v15_y, v15_z))\"\n\ndef _maint_Comp1_dS_Comp1_Ty1_remove(_e):\n # Iterate {(v16_y, v16_z) : v16_y in deltamatch(Comp1_Ty1, 'b', _e, 0), (v16_y, v16_z) in S}\n v16_y = _e\n for v16_z in (_m_S_out[v16_y] if (v16_y in _m_S_out) else set()):\n # Begin maint _m_Comp1_dS_out before \"Comp1_dS.remove((v16_y, v16_z))\"\n _maint__m_Comp1_dS_out_remove((v16_y, v16_z))\n # End maint _m_Comp1_dS_out before \"Comp1_dS.remove((v16_y, v16_z))\"\n Comp1_dS.remove((v16_y, v16_z))\n\ndef _maint_Comp1_dS_S_add(_e):\n # Iterate {(v17_y, v17_z) : v17_y in Comp1_Ty1, (v17_y, v17_z) in deltamatch(S, 'bb', _e, 0)}\n (v17_y, v17_z) = _e\n if (v17_y in Comp1_Ty1):\n Comp1_dS.add((v17_y, v17_z))\n # Begin maint _m_Comp1_dS_out after \"Comp1_dS.add((v17_y, v17_z))\"\n _maint__m_Comp1_dS_out_add((v17_y, v17_z))\n # End maint _m_Comp1_dS_out after \"Comp1_dS.add((v17_y, v17_z))\"\n\ndef _maint_Comp1_dS_S_remove(_e):\n # Iterate {(v18_y, v18_z) : v18_y in Comp1_Ty1, (v18_y, v18_z) in deltamatch(S, 'bb', _e, 0)}\n (v18_y, v18_z) = _e\n if (v18_y in Comp1_Ty1):\n # Begin maint _m_Comp1_dS_out before \"Comp1_dS.remove((v18_y, v18_z))\"\n _maint__m_Comp1_dS_out_remove((v18_y, v18_z))\n # End maint _m_Comp1_dS_out before \"Comp1_dS.remove((v18_y, v18_z))\"\n Comp1_dS.remove((v18_y, v18_z))\n\nComp1_Ty1 = RCSet()\ndef _maint_Comp1_Ty1_Comp1_dE2_add(_e):\n # Iterate {(v13_x, v13_y) : (v13_x, v13_y) in deltamatch(Comp1_dE2, 'bb', _e, 0)}\n (v13_x, v13_y) = _e\n if (v13_y not in Comp1_Ty1):\n # Begin maint Comp1_dS before \"Comp1_Ty1.add(v13_y)\"\n _maint_Comp1_dS_Comp1_Ty1_add(v13_y)\n # End maint Comp1_dS before \"Comp1_Ty1.add(v13_y)\"\n Comp1_Ty1.add(v13_y)\n else:\n Comp1_Ty1.incref(v13_y)\n\ndef _maint_Comp1_Ty1_Comp1_dE2_remove(_e):\n # Iterate {(v14_x, v14_y) : (v14_x, v14_y) in deltamatch(Comp1_dE2, 'bb', _e, 0)}\n (v14_x, v14_y) = _e\n if (Comp1_Ty1.getref(v14_y) == 1):\n Comp1_Ty1.remove(v14_y)\n # Begin maint Comp1_dS after \"Comp1_Ty1.remove(v14_y)\"\n _maint_Comp1_dS_Comp1_Ty1_remove(v14_y)\n # End maint Comp1_dS after \"Comp1_Ty1.remove(v14_y)\"\n else:\n Comp1_Ty1.decref(v14_y)\n\nComp1_dE2 = RCSet()\ndef _maint_Comp1_dE2_Comp1_Tx1_add(_e):\n # Iterate {(v9_x, v9_y) : v9_x in deltamatch(Comp1_Tx1, 'b', _e, 0), (v9_x, v9_y) in E}\n v9_x = _e\n for v9_y in (_m_E_out[v9_x] if (v9_x in _m_E_out) else set()):\n # Begin maint Comp1_Ty1 before \"Comp1_dE2.add((v9_x, v9_y))\"\n _maint_Comp1_Ty1_Comp1_dE2_add((v9_x, v9_y))\n # End maint Comp1_Ty1 before \"Comp1_dE2.add((v9_x, v9_y))\"\n Comp1_dE2.add((v9_x, v9_y))\n # Begin maint _m_Comp1_dE2_out after \"Comp1_dE2.add((v9_x, v9_y))\"\n _maint__m_Comp1_dE2_out_add((v9_x, v9_y))\n # End maint _m_Comp1_dE2_out after \"Comp1_dE2.add((v9_x, v9_y))\"\n\ndef _maint_Comp1_dE2_Comp1_Tx1_remove(_e):\n # Iterate {(v10_x, v10_y) : v10_x in deltamatch(Comp1_Tx1, 'b', _e, 0), (v10_x, v10_y) in E}\n v10_x = _e\n for v10_y in (_m_E_out[v10_x] if (v10_x in _m_E_out) else set()):\n # Begin maint _m_Comp1_dE2_out before \"Comp1_dE2.remove((v10_x, v10_y))\"\n _maint__m_Comp1_dE2_out_remove((v10_x, v10_y))\n # End maint _m_Comp1_dE2_out before \"Comp1_dE2.remove((v10_x, v10_y))\"\n Comp1_dE2.remove((v10_x, v10_y))\n # Begin maint Comp1_Ty1 after \"Comp1_dE2.remove((v10_x, v10_y))\"\n _maint_Comp1_Ty1_Comp1_dE2_remove((v10_x, v10_y))\n # End maint Comp1_Ty1 after \"Comp1_dE2.remove((v10_x, v10_y))\"\n\ndef _maint_Comp1_dE2_E_add(_e):\n # Iterate {(v11_x, v11_y) : v11_x in Comp1_Tx1, (v11_x, v11_y) in deltamatch(E, 'bb', _e, 0)}\n (v11_x, v11_y) = _e\n if (v11_x in Comp1_Tx1):\n # Begin maint Comp1_Ty1 before \"Comp1_dE2.add((v11_x, v11_y))\"\n _maint_Comp1_Ty1_Comp1_dE2_add((v11_x, v11_y))\n # End maint Comp1_Ty1 before \"Comp1_dE2.add((v11_x, v11_y))\"\n Comp1_dE2.add((v11_x, v11_y))\n # Begin maint _m_Comp1_dE2_out after \"Comp1_dE2.add((v11_x, v11_y))\"\n _maint__m_Comp1_dE2_out_add((v11_x, v11_y))\n # End maint _m_Comp1_dE2_out after \"Comp1_dE2.add((v11_x, v11_y))\"\n\nComp1_Tx1 = RCSet()\ndef _maint_Comp1_Tx1_E_add(_e):\n # Iterate {v7_x : (v7_x, v7_x) in deltamatch(E, 'b1', _e, 0)}\n for v7_x in setmatch({_e}, 'u1', ()):\n # Begin maint Comp1_dE2 before \"Comp1_Tx1.add(v7_x)\"\n _maint_Comp1_dE2_Comp1_Tx1_add(v7_x)\n # End maint Comp1_dE2 before \"Comp1_Tx1.add(v7_x)\"\n Comp1_Tx1.add(v7_x)\n\nComp1 = RCSet()\ndef _maint_Comp1__U_Comp1_add(_e):\n # Iterate {(v1_x, v1_y, v1_z) : _ in deltamatch(_U_Comp1, 'w', _e, 0), (v1_x, v1_x) in E, (v1_x, v1_y) in Comp1_dE2, (v1_y, v1_z) in Comp1_dS}\n for _ in setmatch(({_e} if ((_m__U_Comp1_w[()] if (() in _m__U_Comp1_w) else RCSet()).getref(()) == 0) else {}), 'w', ()):\n for v1_x in (_m_E_u1[()] if (() in _m_E_u1) else set()):\n for v1_y in (_m_Comp1_dE2_out[v1_x] if (v1_x in _m_Comp1_dE2_out) else set()):\n for v1_z in (_m_Comp1_dS_out[v1_y] if (v1_y in _m_Comp1_dS_out) else set()):\n if (v1_z not in Comp1):\n Comp1.add(v1_z)\n else:\n Comp1.incref(v1_z)\n\ndef _maint_Comp1__U_Comp1_remove(_e):\n # Iterate {(v2_x, v2_y, v2_z) : _ in deltamatch(_U_Comp1, 'w', _e, 0), (v2_x, v2_x) in E, (v2_x, v2_y) in Comp1_dE2, (v2_y, v2_z) in Comp1_dS}\n for _ in setmatch(({_e} if ((_m__U_Comp1_w[()] if (() in _m__U_Comp1_w) else RCSet()).getref(()) == 0) else {}), 'w', ()):\n for v2_x in (_m_E_u1[()] if (() in _m_E_u1) else set()):\n for v2_y in (_m_Comp1_dE2_out[v2_x] if (v2_x in _m_Comp1_dE2_out) else set()):\n for v2_z in (_m_Comp1_dS_out[v2_y] if (v2_y in _m_Comp1_dS_out) else set()):\n if (Comp1.getref(v2_z) == 1):\n Comp1.remove(v2_z)\n else:\n Comp1.decref(v2_z)\n\ndef _maint_Comp1_E_add(_e):\n # Iterate {(v3_x, v3_y, v3_z) : _ in _U_Comp1, (v3_x, v3_x) in deltamatch(E, 'b1', _e, 0), (v3_x, v3_y) in ((Comp1_dE2 - {_e}) + {_e}), (v3_y, v3_z) in Comp1_dS}\n for v3_x in setmatch({_e}, 'u1', ()):\n for v3_y in (_m_Comp1_dE2_out[v3_x] if (v3_x in _m_Comp1_dE2_out) else set()):\n if ((v3_x, v3_y) != _e):\n for v3_z in (_m_Comp1_dS_out[v3_y] if (v3_y in _m_Comp1_dS_out) else set()):\n for _ in (_m__U_Comp1_w[()] if (() in _m__U_Comp1_w) else RCSet()):\n if (v3_z not in Comp1):\n Comp1.add(v3_z)\n else:\n Comp1.incref(v3_z)\n for v3_y in setmatch({_e}, 'bu', v3_x):\n for v3_z in (_m_Comp1_dS_out[v3_y] if (v3_y in _m_Comp1_dS_out) else set()):\n for _ in (_m__U_Comp1_w[()] if (() in _m__U_Comp1_w) else RCSet()):\n if (v3_z not in Comp1):\n Comp1.add(v3_z)\n else:\n Comp1.incref(v3_z)\n # Iterate {(v3_x, v3_y, v3_z) : _ in _U_Comp1, (v3_x, v3_x) in E, (v3_x, v3_y) in deltamatch(Comp1_dE2, 'bb', _e, 0), (v3_x, v3_y) in Comp1_dE2, (v3_y, v3_z) in Comp1_dS}\n (v3_x, v3_y) = _e\n for _ in (_m_E_b1[v3_x] if (v3_x in _m_E_b1) else set()):\n if ((v3_x, v3_y) in Comp1_dE2):\n for v3_z in (_m_Comp1_dS_out[v3_y] if (v3_y in _m_Comp1_dS_out) else set()):\n for _ in (_m__U_Comp1_w[()] if (() in _m__U_Comp1_w) else RCSet()):\n if (v3_z not in Comp1):\n Comp1.add(v3_z)\n else:\n Comp1.incref(v3_z)\n\ndef _maint_Comp1_S_add(_e):\n # Iterate {(v5_x, v5_y, v5_z) : _ in _U_Comp1, (v5_x, v5_x) in E, (v5_x, v5_y) in E, (v5_y, v5_z) in deltamatch(Comp1_dS, 'bb', _e, 0), (v5_y, v5_z) in Comp1_dS}\n (v5_y, v5_z) = _e\n if ((v5_y, v5_z) in Comp1_dS):\n for v5_x in (_m_E_u1[()] if (() in _m_E_u1) else set()):\n if ((v5_x, v5_y) in E):\n for _ in (_m__U_Comp1_w[()] if (() in _m__U_Comp1_w) else RCSet()):\n if (v5_z not in Comp1):\n Comp1.add(v5_z)\n else:\n Comp1.incref(v5_z)\n\ndef _maint_Comp1_S_remove(_e):\n # Iterate {(v6_x, v6_y, v6_z) : _ in _U_Comp1, (v6_x, v6_x) in E, (v6_x, v6_y) in E, (v6_y, v6_z) in deltamatch(Comp1_dS, 'bb', _e, 0), (v6_y, v6_z) in Comp1_dS}\n (v6_y, v6_z) = _e\n if ((v6_y, v6_z) in Comp1_dS):\n for v6_x in (_m_E_u1[()] if (() in _m_E_u1) else set()):\n if ((v6_x, v6_y) in E):\n for _ in (_m__U_Comp1_w[()] if (() in _m__U_Comp1_w) else RCSet()):\n if (Comp1.getref(v6_z) == 1):\n Comp1.remove(v6_z)\n else:\n Comp1.decref(v6_z)\n\n_U_Comp1 = RCSet()\n_UEXT_Comp1 = Set()\ndef demand_Comp1():\n '{z : _ in _U_Comp1, (x, x) in E, (x, y) in E, (y, z) in S}'\n if (() not in _U_Comp1):\n # Begin maint Comp1 before \"_U_Comp1.add(())\"\n _maint_Comp1__U_Comp1_add(())\n # End maint Comp1 before \"_U_Comp1.add(())\"\n _U_Comp1.add(())\n # Begin maint _m__U_Comp1_w after \"_U_Comp1.add(())\"\n _maint__m__U_Comp1_w_add(())\n # End maint _m__U_Comp1_w after \"_U_Comp1.add(())\"\n else:\n _U_Comp1.incref(())\n\ndef undemand_Comp1():\n '{z : _ in _U_Comp1, (x, x) in E, (x, y) in E, (y, z) in S}'\n if (_U_Comp1.getref(()) == 1):\n # Begin maint _m__U_Comp1_w before \"_U_Comp1.remove(())\"\n _maint__m__U_Comp1_w_remove(())\n # End maint _m__U_Comp1_w before \"_U_Comp1.remove(())\"\n _U_Comp1.remove(())\n # Begin maint Comp1 after \"_U_Comp1.remove(())\"\n _maint_Comp1__U_Comp1_remove(())\n # End maint Comp1 after \"_U_Comp1.remove(())\"\n else:\n _U_Comp1.decref(())\n\ndef query_Comp1():\n '{z : _ in _U_Comp1, (x, x) in E, (x, y) in E, (y, z) in S}'\n if (() not in _UEXT_Comp1):\n _UEXT_Comp1.add(())\n demand_Comp1()\n return True\n\nE = Set()\nprint(sorted((query_Comp1() and Comp1)))\n# Begin maint Comp1_dS before \"S.add((1, 2))\"\n_maint_Comp1_dS_S_add((1, 2))\n# End maint Comp1_dS before \"S.add((1, 2))\"\n# Begin maint Comp1 before \"S.add((1, 2))\"\n_maint_Comp1_S_add((1, 2))\n# End maint Comp1 before \"S.add((1, 2))\"\n# Begin maint _m_S_out after \"S.add((1, 2))\"\n_maint__m_S_out_add((1, 2))\n# End maint _m_S_out after \"S.add((1, 2))\"\n# Begin maint Comp1_Tx1 before \"E.add((1, 1))\"\n_maint_Comp1_Tx1_E_add((1, 1))\n# End maint Comp1_Tx1 before \"E.add((1, 1))\"\n# Begin maint Comp1_dE2 before \"E.add((1, 1))\"\n_maint_Comp1_dE2_E_add((1, 1))\n# End maint Comp1_dE2 before \"E.add((1, 1))\"\n# Begin maint Comp1 before \"E.add((1, 1))\"\n_maint_Comp1_E_add((1, 1))\n# End maint Comp1 before \"E.add((1, 1))\"\nE.add((1, 1))\n# Begin maint _m_E_b1 after \"E.add((1, 1))\"\n_maint__m_E_b1_add((1, 1))\n# End maint _m_E_b1 after \"E.add((1, 1))\"\n# Begin maint _m_E_u1 after \"E.add((1, 1))\"\n_maint__m_E_u1_add((1, 1))\n# End maint _m_E_u1 after \"E.add((1, 1))\"\n# Begin maint _m_E_out after \"E.add((1, 1))\"\n_maint__m_E_out_add((1, 1))\n# End maint _m_E_out after \"E.add((1, 1))\"\nprint(sorted((query_Comp1() and Comp1)))\n# Begin maint _m_S_out before \"S.remove((1, 2))\"\n_maint__m_S_out_remove((1, 2))\n# End maint _m_S_out before \"S.remove((1, 2))\"\n# Begin maint Comp1 after \"S.remove((1, 2))\"\n_maint_Comp1_S_remove((1, 2))\n# End maint Comp1 after \"S.remove((1, 2))\"\n# Begin maint Comp1_dS after \"S.remove((1, 2))\"\n_maint_Comp1_dS_S_remove((1, 2))\n# End maint Comp1_dS after \"S.remove((1, 2))\"\nprint(sorted((query_Comp1() and Comp1)))" }, { "alpha_fraction": 0.6299405097961426, "alphanum_fraction": 0.6320391893386841, "avg_line_length": 30.076086044311523, "blob_id": "918a2cc0ddd139782906a9bbb76aa6be953a90d6", "content_id": "0fc7f2d08ca91ffefde343f2ec113418bd262cbd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2859, "license_type": "no_license", "max_line_length": 68, "num_lines": 92, "path": "/experiments/distalgo/recompile.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Recompile Distalgo programs.\"\"\"\n\nimport sys\nimport os\nfrom os.path import dirname, join\nfrom multiprocessing import Process\nfrom configparser import ConfigParser\nfrom shutil import copy\n\nimport da\n\n\ndef get_benchmark_path():\n \"\"\"Return the path to the distalgo benchmarks directory.\n Assume current directory is the one containing this file.\n \"\"\"\n config = ConfigParser()\n config.read('../config.txt')\n dapath = config['python']['DISTALGO_PATH']\n return join(dapath, 'benchmarks')\n\n\ndef compile(dafile, pyfile, incfile):\n \"\"\"Compile the input dafile to two output files: the main module\n pyfile and the incrementalization interface incfile.\n \"\"\"\n sys.argv = [\n sys.argv[0],\n '-o', pyfile,\n '-i', '-m', incfile,\n '--jb-style',\n '--no-table3', '--no-table4',\n dafile\n ]\n \n # Use a separate subprocess because the distalgo compiler\n # doesn't like being called multiple times from the same\n # process.\n p = Process(target=da.compiler.main)\n p.start()\n p.join()\n\ndef do_tasks(tasks):\n \"\"\"Perform several compilation steps. For each task, copy over\n the .da file from the distalgo benchmarks directory to the local\n directory, and do the compilation. Also copy the controller.da\n file to the local directory.\n \n Each task is a pair of the input filename minus the .da suffix,\n relative to the distalgo benchmarks directory; and the output\n file prefix minus the .py or _inc_in.py suffix, relative to the\n local directory.\n \"\"\"\n mydir = dirname(__file__)\n # Handle case where current dir is the same dir containing\n # this file.\n mydir = join('.', mydir)\n os.chdir(mydir)\n benchpath = get_benchmark_path()\n \n for inpath, outpath in tasks:\n os.makedirs(dirname(outpath), exist_ok=True)\n orig_dafile = join(benchpath, '{}.da'.format(inpath))\n copy(orig_dafile, '{}.da'.format(outpath))\n compile('{}.da'.format(outpath),\n '{}.py'.format(outpath),\n '{}_inc_in.py'.format(outpath))\n \n copy(join(benchpath, 'controller.da'), 'controller.da')\n\n\ntasks = [\n# ('clpaxos/spec', 'clpaxos/clpaxos'),\n# ('crleader/orig', 'crleader/crleader'),\n# ('dscrash/spec', 'dscrash/dscrash'),\n# ('hsleader/spec', 'hsleader/hsleader'),\n# \n# ('lamutex/spec_unopt_relack', 'lamutex/lamutex'),\n# ('lamutex/spec_unopt_ack', 'lamutex/lamutex_opt1'),\n# ('lamutex/spec', 'lamutex/lamutex_opt2'),\n# ('lamutex/orig', 'lamutex/lamutex_orig'),\n# \n# ('lapaxos/orig', 'lapaxos/lapaxos'),\n# ('ramutex/spec', 'ramutex/ramutex'),\n# ('ratoken/spec', 'ratoken/ratoken'),\n# ('sktoken/orig', 'sktoken/sktoken'),\n# ('2pcommit/spec', 'tpcommit/tpcommit'),\n# ('vrpaxos/orig', 'vrpaxos/vrpaxos'),\n]\n\nif __name__ == '__main__':\n do_tasks(tasks)\n" }, { "alpha_fraction": 0.6077319383621216, "alphanum_fraction": 0.610162615776062, "avg_line_length": 29.528268814086914, "blob_id": "693d7db6c92808b07f50b0fbd4c863f9895c6935", "content_id": "de52bb31335b92741bbec37130715a8bcccfae30", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17279, "license_type": "no_license", "max_line_length": 78, "num_lines": 566, "path": "/incoq/compiler/cost/cost.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Definitions of cost terms and framework for algebraic manipulation.\"\"\"\n\n\n__all__ = [\n 'Cost',\n 'UnknownCost',\n 'UnitCost',\n 'NameCost',\n 'IndefImgsetCost',\n 'DefImgsetCost',\n 'ProductCost',\n 'SumCost',\n 'MinCost',\n \n 'eval_coststr',\n \n 'BaseCostVisitor',\n 'CostVisitor',\n 'CostTransformer',\n \n 'PrettyPrinter',\n 'CostSubstitutor',\n 'ImgkeySubstitutor',\n 'Simplifier',\n 'normalize',\n 'lteq',\n]\n\nfrom itertools import product, chain, groupby\nfrom collections import Counter\n\nfrom simplestruct import Struct, TypedField\n\nfrom incoq.util.collections import OrderedSet\nfrom incoq.compiler.set import Mask\n\n\nclass Cost(Struct):\n \"\"\"An asymptotic cost term.\"\"\"\n\nclass UnknownCost(Cost):\n \n def __str__(self):\n return '?'\n\nclass UnitCost(Cost):\n \n def __str__(self):\n return '1'\n\nclass NameCost(Cost):\n name = TypedField(str)\n \n def __str__(self):\n return self.name\n\nclass IndefImgsetCost(Cost):\n rel = TypedField(str)\n mask = TypedField(Mask)\n \n def __str__(self):\n return self.rel + '_' + str(self.mask)\n\nclass DefImgsetCost(Cost):\n rel = TypedField(str)\n mask = TypedField(Mask)\n key = TypedField(str, seq=True)\n \n def __str__(self):\n return '{}_{}[{}]'.format(self.rel, self.mask,\n ', '.join(self.key))\n \n def to_indef(self):\n \"\"\"Return the indefinite image set cost that generalizes\n this cost.\n \"\"\"\n return IndefImgsetCost(self.rel, self.mask)\n\nclass ProductCost(Cost):\n \n terms = TypedField(Cost, seq=True)\n \n @classmethod\n def from_products(cls, costs):\n \"\"\"Form as the concatenation of other ProductCosts.\"\"\"\n assert all(isinstance(c, ProductCost) for c in costs)\n return ProductCost(tuple(chain.from_iterable(c.terms for c in costs)))\n \n def __str__(self):\n return '(' + '*'.join(str(t) for t in self.terms) + ')'\n\nclass SumCost(Cost):\n \n terms = TypedField(Cost, seq=True)\n \n @classmethod\n def from_sums(cls, costs):\n \"\"\"Form as the concatenation of other SumCosts.\"\"\"\n assert all(isinstance(c, SumCost) for c in costs)\n return SumCost(tuple(chain.from_iterable(c.terms for c in costs)))\n \n def __str__(self):\n return '(' + ' + '.join(str(s) for s in self.terms) + ')'\n\nclass MinCost(Cost):\n \n terms = TypedField(Cost, seq=True)\n \n @classmethod\n def from_mins(cls, costs):\n \"\"\"Form as the concatenation of other MinCosts.\"\"\"\n assert all(isinstance(c, MinCost) for c in costs)\n return MinCost(tuple(chain.from_iterable(c.terms for c in costs)))\n \n def __str__(self):\n return 'min(' + ', '.join(str(s) for s in self.terms) + ')'\n\n\ndef eval_coststr(s):\n \"\"\"eval() a string representing a cost expression.\"\"\"\n ns = {k: v for k, v in globals().items()\n if isinstance(v, type) and issubclass(v, Cost)}\n return eval(s, ns)\n\n\nclass BaseCostVisitor:\n \n \"\"\"Visitor for costs, analogous to NodeVisitor.\"\"\"\n \n # We don't use a generic_visit() method because there are only\n # a few types of Cost nodes, and that would require a common\n # list of subterms a la '_fields' for AST nodes. Instead we give\n # visit_ handlers for each kind of cost.\n #\n # To avoid an accidental inconsistency if a new cost type is added\n # but a new handler is not defined, we separate BaseCostVisitor\n # from CostVisitor so that CostTransformer is forced to explicitly\n # provide its own handlers.\n \n @classmethod\n def run(cls, tree, *args, **kargs):\n visitor = cls(*args, **kargs)\n result = visitor.process(tree)\n return result\n \n def process(self, tree):\n result = self.visit(tree)\n return result\n \n def visit(self, cost):\n assert isinstance(cost, Cost)\n method = 'visit_' + cost.__class__.__name__\n visitor = getattr(self, method)\n result = visitor(cost)\n return result\n\nclass CostVisitor(BaseCostVisitor):\n \n def do_nothing(self, cost):\n return\n \n visit_UnknownCost = do_nothing\n visit_UnitCost = do_nothing\n visit_NameCost = do_nothing\n visit_IndefImgsetCost = do_nothing\n visit_DefImgsetCost = do_nothing\n \n def do_termlist(self, cost):\n for c in cost.terms:\n self.visit(c)\n \n visit_ProductCost = do_termlist\n visit_SumCost = do_termlist\n visit_MinCost = do_termlist\n\nclass CostTransformer(BaseCostVisitor):\n \n \"\"\"Transformer for costs. Like NodeTransformer, use None to indicate\n no change, and in a sequence return () to indicate removal of this\n term.\n \"\"\"\n \n def visit(self, cost):\n result = super().visit(cost)\n if result is None:\n result = cost\n return result\n \n def do_nothing(self, cost):\n return cost\n \n visit_UnknownCost = do_nothing\n visit_UnitCost = do_nothing\n visit_NameCost = do_nothing\n visit_IndefImgsetCost = do_nothing\n visit_DefImgsetCost = do_nothing\n \n def do_termlist(self, cost):\n changed = False\n new_terms = []\n \n for c in cost.terms:\n result = self.visit(c)\n if result is not c:\n changed = True\n if isinstance(result, (tuple, list)):\n new_terms.extend(result)\n else:\n new_terms.append(result)\n \n if changed:\n return cost._replace(terms=new_terms)\n else:\n return cost\n \n visit_ProductCost = do_termlist\n visit_SumCost = do_termlist\n visit_MinCost = do_termlist\n\n\nclass PrettyPrinter(CostVisitor):\n \n def helper(self, cost):\n return str(cost)\n \n visit_UnknownCost = helper\n visit_UnitCost = helper\n visit_NameCost = helper\n visit_IndefImgsetCost = helper\n visit_DefImgsetCost = helper\n \n def visit_ProductCost(self, cost):\n termstrs = []\n \n # Sort terms by string representation first, then group.\n # Each repetition of the same term is coalesced into a power.\n terms = list(cost.terms)\n terms.sort(key=lambda t: str(t).lower())\n for key, group in groupby(terms):\n n = len(list(group))\n s = self.visit(key)\n if n == 1:\n termstrs.append(s)\n else:\n termstrs.append(s + '^' + str(n))\n \n return '(' + '*'.join(termstrs) + ')'\n \n def visit_SumCost(self, cost):\n return '(' + ' + '.join(self.visit(t) for t in cost.terms) + ')'\n \n def visit_MinCost(self, cost):\n return 'min(' + ', '.join(self.visit(t) for t in cost.terms) + ')'\n\n\nclass CostSubstitutor(CostTransformer):\n \n \"\"\"Apply a substitution to replace some costs with others.\n Mainly intended to help simplify costs by replacing name or\n image set costs with unit costs or other names.\n \n If subsume_maps is True, indefinite image set costs in the\n substitution map will also match their definite image set cost\n versions.\n \"\"\"\n \n def __init__(self, subst, *, subsume_maps=False):\n super().__init__()\n self.subst = subst\n self.subsume_maps = subsume_maps\n \n def visit(self, cost):\n if cost in self.subst:\n return self.subst[cost]\n else:\n return super().visit(cost)\n \n def visit_DefImgsetCost(self, cost):\n if self.subsume_maps:\n indef = cost.to_indef()\n if indef in self.subst:\n return self.subst[indef]\n return cost\n\n\nclass ImgkeySubstitutor(CostTransformer):\n \n \"\"\"Apply a substitution to imageset keys. Use None in the mapping\n to replace definite imagesets with indefinite ones.\n \"\"\"\n \n def __init__(self, subst):\n super().__init__()\n self.subst = subst\n \n def visit_DefImgsetCost(self, cost):\n new_key = tuple(self.subst.get(k, k) for k in cost.key)\n if None not in new_key:\n return cost._replace(key=new_key)\n else:\n return cost.to_indef()\n\n\ndef without_duplicates(cost):\n \"\"\"For a ProductCost, SumCost, or MinCost, return a version without\n repeated terms among the direct arguments.\n \"\"\"\n assert isinstance(cost, (ProductCost, SumCost, MinCost))\n new_terms = OrderedSet(cost.terms)\n return cost._replace(terms=new_terms)\n\nclass Simplifier(CostTransformer):\n \n \"\"\"Return an algebraically equivalent cost tree that is either\n simpler than this one or the same as this one.\n \n For sum and min cost terms, rewrite them to eliminate duplicate\n entries. For product and sum, eliminate unit cost entries.\n \n If unwrap is True, unwrap any costs that have only one subterm,\n or replace them with the unit cost if they have zero subterms.\n \"\"\"\n \n def __init__(self, unwrap=True):\n super().__init__()\n self.unwrap = unwrap\n \n def recurse_helper(self, cost):\n terms = [self.visit(t) for t in cost.terms]\n return cost._replace(terms=terms)\n \n def unique_helper(self, cost):\n terms = OrderedSet(cost.terms)\n return cost._replace(terms=terms)\n \n def elimunit_helper(self, cost):\n terms = [t for t in cost.terms if t != UnitCost()]\n return cost._replace(terms=terms)\n \n def unwrap_helper(self, cost):\n if self.unwrap:\n if len(cost.terms) == 0:\n return UnitCost()\n elif len(cost.terms) == 1:\n return cost.terms[0]\n return cost\n \n def visit_ProductCost(self, cost):\n cost = self.recurse_helper(cost)\n cost = self.elimunit_helper(cost)\n cost = self.unwrap_helper(cost)\n return cost\n \n def visit_SumCost(self, cost):\n cost = self.recurse_helper(cost)\n cost = self.unique_helper(cost)\n cost = self.elimunit_helper(cost)\n cost = self.unwrap_helper(cost)\n return cost\n \n def visit_MinCost(self, cost):\n cost = self.recurse_helper(cost)\n cost = self.unique_helper(cost)\n cost = self.unwrap_helper(cost)\n return cost\n\n\ndef build_factor_counts(prods):\n \"\"\"Given a list of product costs, build a map from each product\n to a counter. The counter itself is a map from each factor to\n the number of times it appears in that product.\n \"\"\"\n assert all(isinstance(p, ProductCost) for p in prods)\n result = {}\n for p in prods:\n # If the same one is seen twice, it's previous entry will just\n # be overwritten with the same information.\n result[p] = Counter(p.terms)\n return result\n\ndef all_products_dominated(prods1, prods2, factorcounts=None):\n \"\"\"Return True if for every product cost in prods1, there is some\n product cost in prods2 that dominates it.\n \n If factorcounts is not given, it will be computed from scratch.\n \"\"\"\n allprods = list(chain(prods1, prods2))\n assert all(isinstance(p, ProductCost) for p in allprods)\n \n if factorcounts is None:\n factorcounts = build_factor_counts(allprods)\n \n # Do a pairwise comparison.\n for p1 in prods1:\n c1 = factorcounts[p1]\n for p2 in prods2:\n c2 = factorcounts[p2]\n # t1 is dominated if for each of its factors,\n # t2 has at least that many occurrences of the factor.\n if all(isinstance(f, UnitCost) or c2[f] >= c1[f]\n for f in c1.keys()):\n break\n else:\n return False\n return True\n\ndef all_sums_of_products_dominate(sums1, sums2, factorcounts=None):\n \"\"\"Return True if for every sum-of-products cost in sums1, there\n is some sum-of-products cost in sums2 that it dominates. Note that\n the \"dominate\" order is opposite the above function.\n \n If factorcounts is not given, it will be computed from scratch.\n \"\"\"\n allsums = list(chain(sums1, sums2))\n assert all(isinstance(s, SumCost) for s in allsums)\n assert all(isinstance(p, ProductCost)\n for s in allsums for p in s.terms)\n \n if factorcounts is None:\n factorcounts = build_factor_counts(\n list(chain.from_iterable(s.terms for s in allsums)))\n \n # Do a pairwise comparison.\n for s1 in sums1:\n for s2 in sums2:\n if all_products_dominated(s2.terms, s1.terms):\n break\n else:\n return False\n return True\n\ndef simplify_sum_of_products(sumcost):\n \"\"\"For a sum of products, return a version of this cost where\n products that are dominated by other products are removed.\n \"\"\"\n assert isinstance(sumcost, SumCost)\n assert all(isinstance(p, ProductCost) for p in sumcost.terms)\n \n # A naive approach only keeps terms that are not dominated by any\n # other term. This would incorrectly remove two terms that are\n # dominated only by each other. Once a term is dominated, we remove\n # it from the set so it can't be used to dominate anything else.\n \n terms = list(OrderedSet(sumcost.terms))\n factorcounts = build_factor_counts(terms)\n \n # Go right-to-left so that we keep the left occurrence of distinct\n # tied terms. (Non-distinct tied terms are eliminated as duplicates\n # above.)\n for prod in reversed(list(terms)):\n rest = OrderedSet(terms) - {prod}\n if all_products_dominated([prod], rest, factorcounts):\n terms.remove(prod)\n \n return sumcost._replace(terms=terms)\n\ndef simplify_min_of_sums(mincost):\n \"\"\"For a min of sums, return a version of this cost where\n sums that dominate other sums are removed.\n \"\"\"\n assert isinstance(mincost, MinCost)\n terms = mincost.terms\n assert all(isinstance(s, SumCost)\n for s in terms)\n assert all(isinstance(p, ProductCost)\n for s in terms for p in s.terms)\n \n terms = list(OrderedSet(mincost.terms))\n factorcounts = build_factor_counts([p for s in terms for p in s.terms])\n \n for sum1 in reversed(list(terms)):\n rest = OrderedSet(terms) - {sum1}\n for sum2 in rest:\n if all_products_dominated(sum2.terms, sum1.terms, factorcounts):\n terms.remove(sum1)\n break\n \n return mincost._replace(terms=terms)\n\ndef multiply_sums_of_products(sums):\n \"\"\"Given a list of sums of products, produce their overall product\n in sum-of-product form.\n \"\"\"\n assert all(isinstance(s, SumCost)\n for s in sums)\n assert all(isinstance(p, ProductCost)\n for s in sums for p in s.terms)\n \n product_lists = [s.terms for s in sums]\n new_terms = []\n for comb in product(*product_lists):\n nt = ProductCost.from_products(comb)\n new_terms.append(nt)\n return SumCost(new_terms)\n\nclass Normalizer(CostTransformer):\n \n \"\"\"Produces a cost in normalized form, where normalized means that\n it is a min of sums of products.\n \"\"\"\n \n # Each recursive call returns a normalized cost. The visitors\n # combine normalized costs together.\n #\n # We simplify each complex term before returning it, in hopes\n # of avoiding an explosion in the size of intermediate terms.\n \n def wrapper_helper(self, cost):\n return MinCost((SumCost((ProductCost((cost,)),)),))\n \n visit_UnknownCost = wrapper_helper\n visit_UnitCost = wrapper_helper\n visit_NameCost = wrapper_helper\n visit_IndefImgsetCost = wrapper_helper\n visit_DefImgsetCost = wrapper_helper\n \n def visit_ProductCost(self, cost):\n cost = super().visit_ProductCost(cost)\n \n sum_lists = [m.terms for m in cost.terms]\n new_terms = []\n for comb in product(*sum_lists):\n term = multiply_sums_of_products(comb)\n term = simplify_sum_of_products(term)\n new_terms.append(term)\n \n cost = MinCost(new_terms)\n cost = Simplifier.run(cost, unwrap=False)\n cost = simplify_min_of_sums(cost)\n return cost\n \n def visit_SumCost(self, cost):\n cost = super().visit_SumCost(cost)\n \n sum_lists = [m.terms for m in cost.terms]\n new_terms = []\n for comb in product(*sum_lists):\n term = SumCost.from_sums(comb)\n term = simplify_sum_of_products(term)\n new_terms.append(term)\n \n cost = MinCost(new_terms)\n cost = Simplifier.run(cost, unwrap=False)\n cost = simplify_min_of_sums(cost)\n return cost\n \n def visit_MinCost(self, cost):\n cost = super().visit_MinCost(cost)\n cost = MinCost.from_mins(cost.terms)\n cost = Simplifier.run(cost, unwrap=False)\n cost = simplify_min_of_sums(cost)\n return cost\n\ndef normalize(cost):\n \"\"\"Normalize and simplify a cost.\"\"\"\n cost = Normalizer.run(cost)\n cost = Simplifier.run(cost)\n return cost\n\ndef lteq(left, right):\n \"\"\"Return True if the left cost is equal to or dominated by\n the right cost. This is a partial order over costs.\n \"\"\"\n left = Normalizer.run(left)\n right = Normalizer.run(right)\n return all_sums_of_products_dominate(right.terms, left.terms)\n" }, { "alpha_fraction": 0.7239263653755188, "alphanum_fraction": 0.7239263653755188, "avg_line_length": 19.375, "blob_id": "c9f79e4e49e0ce71c3c823a1d7d8dc15e7e680e2", "content_id": "19d1bd173a7617583ca0e68a2ed3dbc8208c5f5e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 163, "license_type": "no_license", "max_line_length": 53, "num_lines": 8, "path": "/incoq/compiler/tup/__init__.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Support for nested tuples in the object domain.\"\"\"\n\n\n# Exports.\nfrom .tuprel import *\nfrom .tupclause import *\nfrom .tupletrans import *\nfrom .flatten import *\n" }, { "alpha_fraction": 0.5256344079971313, "alphanum_fraction": 0.531330943107605, "avg_line_length": 32.2931022644043, "blob_id": "e0a500561bc316a4686e004f6e8e98c04c7a47ce", "content_id": "9b4d7846ce08f8386d14ceff53155e6f3b3e44ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1931, "license_type": "no_license", "max_line_length": 166, "num_lines": 58, "path": "/incoq/tests/invinc/incast/test_typeeval.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Unit tests for typeeval.py.\"\"\"\n\n\nimport unittest\n\nfrom incoq.compiler.incast.structconv import parse_structast\nfrom incoq.compiler.incast.nodeconv import IncLangImporter\nfrom incoq.compiler.incast.types import *\nfrom incoq.compiler.incast.types import add_fresh_typevars, subst_typevars\nfrom incoq.compiler.incast import ts_typed, trim\n\nfrom incoq.compiler.incast.typeeval import *\n\n\nclass ConstraintCase(unittest.TestCase):\n \n def p(self, source, subst=None, mode=None):\n return IncLangImporter.run(\n parse_structast(source, mode=mode, subst=subst))\n \n def test_program1(self):\n tree = self.p('''\n x, y = (1+2, True and False)\n (x, y)\n [1, 2, 'a']\n {x for x in S}\n ''')\n tree, store = analyze_types(tree, {'S': SetType(bottomtype)})\n source = ts_typed(tree)\n exp_source = trim('''\n (((x : Number), (y : bool)) : (Number, bool)) = (((((1 : Number) + (2 : Number)) : Number), (((True : bool) and (False : bool)) : bool)) : (Number, bool))\n (((x : Number), (y : bool)) : (Number, bool))\n ([(1 : Number), (2 : Number), ('a' : str)] : [Top])\n (COMP({(x : Number) for (x : Number) in (S : {Bottom})}, None, None) : {Number})\n ''')\n self.assertEqual(source, exp_source)\n self.assertEqual(store['x'], numbertype)\n \n def test_program2(self):\n tree = self.p('''\n S.add(x)\n ''')\n tree, store = analyze_types(tree, {'x': numbertype})\n self.assertEqual(store['S'], SetType(numbertype))\n \n def test_program3(self):\n tree = self.p('''\n S.add(T)\n T.add(S)\n ''')\n tree, store = analyze_types(tree)\n s = str(store['S'])\n exp_s = '{{{{{{{{{{Top}}}}}}}}}}'\n self.assertEqual(s, exp_s)\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.43853819370269775, "alphanum_fraction": 0.5199335813522339, "avg_line_length": 37.230159759521484, "blob_id": "b7d2b50990f692acb0be337bde4b947307299a64", "content_id": "86dafbb0e6a917f15e1f6809d0ac8f7a40987f85", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4816, "license_type": "no_license", "max_line_length": 133, "num_lines": 126, "path": "/incoq/tests/programs/deminc/reorder_out.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from incoq.runtime import *\n# Comp1 := {(z, x) : (x, y) in E, (y, z) in E}\n# Comp1_Ty2 := {y : (y, z) in E}\n# Comp1_dE1 := {(x, y) : y in Comp1_Ty2, (x, y) in E}\n_m_Comp1_out = Map()\ndef _maint__m_Comp1_out_add(_e):\n (v15_1, v15_2) = _e\n if (v15_1 not in _m_Comp1_out):\n _m_Comp1_out[v15_1] = set()\n _m_Comp1_out[v15_1].add(v15_2)\n\ndef _maint__m_Comp1_out_remove(_e):\n (v16_1, v16_2) = _e\n _m_Comp1_out[v16_1].remove(v16_2)\n if (len(_m_Comp1_out[v16_1]) == 0):\n del _m_Comp1_out[v16_1]\n\n_m_Comp1_dE1_in = Map()\ndef _maint__m_Comp1_dE1_in_add(_e):\n (v13_1, v13_2) = _e\n if (v13_2 not in _m_Comp1_dE1_in):\n _m_Comp1_dE1_in[v13_2] = set()\n _m_Comp1_dE1_in[v13_2].add(v13_1)\n\ndef _maint__m_Comp1_dE1_in_remove(_e):\n (v14_1, v14_2) = _e\n _m_Comp1_dE1_in[v14_2].remove(v14_1)\n if (len(_m_Comp1_dE1_in[v14_2]) == 0):\n del _m_Comp1_dE1_in[v14_2]\n\n_m_E_out = Map()\ndef _maint__m_E_out_add(_e):\n (v11_1, v11_2) = _e\n if (v11_1 not in _m_E_out):\n _m_E_out[v11_1] = set()\n _m_E_out[v11_1].add(v11_2)\n\n_m_E_in = Map()\ndef _maint__m_E_in_add(_e):\n (v9_1, v9_2) = _e\n if (v9_2 not in _m_E_in):\n _m_E_in[v9_2] = set()\n _m_E_in[v9_2].add(v9_1)\n\nComp1_dE1 = RCSet()\ndef _maint_Comp1_dE1_Comp1_Ty2_add(_e):\n # Iterate {(v5_y, v5_x) : v5_y in deltamatch(Comp1_Ty2, 'b', _e, 1), (v5_x, v5_y) in E}\n v5_y = _e\n for v5_x in (_m_E_in[v5_y] if (v5_y in _m_E_in) else set()):\n Comp1_dE1.add((v5_x, v5_y))\n # Begin maint _m_Comp1_dE1_in after \"Comp1_dE1.add((v5_x, v5_y))\"\n _maint__m_Comp1_dE1_in_add((v5_x, v5_y))\n # End maint _m_Comp1_dE1_in after \"Comp1_dE1.add((v5_x, v5_y))\"\n\ndef _maint_Comp1_dE1_Comp1_Ty2_remove(_e):\n # Iterate {(v6_y, v6_x) : v6_y in deltamatch(Comp1_Ty2, 'b', _e, 1), (v6_x, v6_y) in E}\n v6_y = _e\n for v6_x in (_m_E_in[v6_y] if (v6_y in _m_E_in) else set()):\n # Begin maint _m_Comp1_dE1_in before \"Comp1_dE1.remove((v6_x, v6_y))\"\n _maint__m_Comp1_dE1_in_remove((v6_x, v6_y))\n # End maint _m_Comp1_dE1_in before \"Comp1_dE1.remove((v6_x, v6_y))\"\n Comp1_dE1.remove((v6_x, v6_y))\n\ndef _maint_Comp1_dE1_E_add(_e):\n # Iterate {(v7_y, v7_x) : v7_y in Comp1_Ty2, (v7_x, v7_y) in deltamatch(E, 'bb', _e, 1)}\n (v7_x, v7_y) = _e\n if (v7_y in Comp1_Ty2):\n Comp1_dE1.add((v7_x, v7_y))\n # Begin maint _m_Comp1_dE1_in after \"Comp1_dE1.add((v7_x, v7_y))\"\n _maint__m_Comp1_dE1_in_add((v7_x, v7_y))\n # End maint _m_Comp1_dE1_in after \"Comp1_dE1.add((v7_x, v7_y))\"\n\nComp1_Ty2 = RCSet()\ndef _maint_Comp1_Ty2_E_add(_e):\n # Iterate {(v3_y, v3_z) : (v3_y, v3_z) in deltamatch(E, 'bb', _e, 1)}\n (v3_y, v3_z) = _e\n if (v3_y not in Comp1_Ty2):\n Comp1_Ty2.add(v3_y)\n # Begin maint Comp1_dE1 after \"Comp1_Ty2.add(v3_y)\"\n _maint_Comp1_dE1_Comp1_Ty2_add(v3_y)\n # End maint Comp1_dE1 after \"Comp1_Ty2.add(v3_y)\"\n else:\n Comp1_Ty2.incref(v3_y)\n\nComp1 = RCSet()\ndef _maint_Comp1_E_add(_e):\n v1_DAS = set()\n # Iterate {(v1_x, v1_y, v1_z) : (v1_x, v1_y) in deltamatch(Comp1_dE1, 'bb', _e, 1), (v1_x, v1_y) in Comp1_dE1, (v1_y, v1_z) in E}\n (v1_x, v1_y) = _e\n if ((v1_x, v1_y) in Comp1_dE1):\n for v1_z in (_m_E_out[v1_y] if (v1_y in _m_E_out) else set()):\n if ((v1_x, v1_y, v1_z) not in v1_DAS):\n v1_DAS.add((v1_x, v1_y, v1_z))\n # Iterate {(v1_x, v1_y, v1_z) : (v1_x, v1_y) in Comp1_dE1, (v1_y, v1_z) in deltamatch(E, 'bb', _e, 1)}\n (v1_y, v1_z) = _e\n for v1_x in (_m_Comp1_dE1_in[v1_y] if (v1_y in _m_Comp1_dE1_in) else set()):\n if ((v1_x, v1_y, v1_z) not in v1_DAS):\n v1_DAS.add((v1_x, v1_y, v1_z))\n for (v1_x, v1_y, v1_z) in v1_DAS:\n if ((v1_z, v1_x) not in Comp1):\n Comp1.add((v1_z, v1_x))\n # Begin maint _m_Comp1_out after \"Comp1.add((v1_z, v1_x))\"\n _maint__m_Comp1_out_add((v1_z, v1_x))\n # End maint _m_Comp1_out after \"Comp1.add((v1_z, v1_x))\"\n else:\n Comp1.incref((v1_z, v1_x))\n del v1_DAS\n\nfor (a, b) in {(1, 3), (2, 3), (3, 4)}:\n # Begin maint _m_E_out after \"E.add((a, b))\"\n _maint__m_E_out_add((a, b))\n # End maint _m_E_out after \"E.add((a, b))\"\n # Begin maint _m_E_in after \"E.add((a, b))\"\n _maint__m_E_in_add((a, b))\n # End maint _m_E_in after \"E.add((a, b))\"\n # Begin maint Comp1_dE1 after \"E.add((a, b))\"\n _maint_Comp1_dE1_E_add((a, b))\n # End maint Comp1_dE1 after \"E.add((a, b))\"\n # Begin maint Comp1_Ty2 after \"E.add((a, b))\"\n _maint_Comp1_Ty2_E_add((a, b))\n # End maint Comp1_Ty2 after \"E.add((a, b))\"\n # Begin maint Comp1 after \"E.add((a, b))\"\n _maint_Comp1_E_add((a, b))\n # End maint Comp1 after \"E.add((a, b))\"\nz = 4\nprint(sorted((_m_Comp1_out[z] if (z in _m_Comp1_out) else set())))" }, { "alpha_fraction": 0.4759615361690521, "alphanum_fraction": 0.47961539030075073, "avg_line_length": 32.440513610839844, "blob_id": "60ccd61aa15678aa6d7463c865259f151abbc23d", "content_id": "0c6fc9731cbf0013ce92e6a7d79ba8f88cccd881", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10400, "license_type": "no_license", "max_line_length": 71, "num_lines": 311, "path": "/incoq/tests/invinc/comp/test_clause.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Unit tests for clause.py.\"\"\"\n\n\nimport unittest\n\nimport incoq.compiler.incast as L\nfrom incoq.compiler.comp import Rate\nfrom incoq.compiler.comp.clause import *\n\n\nclass DummyFactory(ClauseFactory):\n @classmethod\n def from_AST(cls, node):\n return EnumClause.from_AST(node, cls)\n\n\nclass ClauseCase(unittest.TestCase):\n \n def test_subst(self):\n res = apply_subst_tuple(('a', 'b', 'c'),\n {'a': 'z', 'b': lambda s: s * 2})\n exp_res = ('z', 'bb', 'c')\n self.assertEqual(res, exp_res)\n \n def test_inst_wildcards(self):\n vars = ('a', '_', 'b', '_')\n vars = inst_wildcards(vars)\n exp_vars = ('a', '_v1', 'b', '_v2')\n self.assertEqual(vars, exp_vars)\n \n def test_enumclause_basic(self):\n cl = EnumClause(('x', 'y', 'x', '_'), 'R')\n \n # From expression.\n cl2 = EnumClause.from_expr(L.pe('(x, y, x, _) in R'))\n self.assertEqual(cl2, cl)\n \n # AST round-trip.\n clast = cl.to_AST()\n exp_clast = \\\n L.Enumerator(L.tuplify(['x', 'y', 'x', '_'], lval=True),\n L.ln('R'))\n self.assertEqual(clast, exp_clast)\n cl2 = EnumClause.from_AST(exp_clast, DummyFactory)\n self.assertEqual(cl2, cl)\n \n # Attributes.\n \n self.assertFalse(cl.isdelta)\n \n self.assertEqual(cl.enumlhs, ('x', 'y', 'x', '_'))\n self.assertEqual(cl.enumvars, ('x', 'y'))\n self.assertEqual(cl.pat_mask, (True, True, True, True))\n self.assertEqual(cl.enumrel, 'R')\n self.assertTrue(cl.has_wildcards)\n \n self.assertEqual(cl.vars, ('x', 'y'))\n self.assertEqual(cl.eqvars, None)\n \n self.assertTrue(cl.robust)\n self.assertEqual(cl.demname, None)\n self.assertEqual(cl.demparams, ())\n \n def test_enumclause_manipulate(self):\n cl = EnumClause(('x', 'y', 'x', '_'), 'R')\n \n # rewrite_rel().\n cl3 = cl.rewrite_rel('S', ClauseFactory)\n exp_cl3 = EnumClause(('x', 'y', 'x', '_'), 'S')\n self.assertEqual(cl3, exp_cl3)\n \n def test_enumclause_code(self):\n cl = EnumClause(('x', 'y'), 'R')\n \n # fits_string().\n self.assertTrue(cl.fits_string(['x'], 'R_out'))\n \n # Rating.\n self.assertEqual(cl.rate(['x']), Rate.NORMAL)\n self.assertEqual(cl.rate(['x', 'y']), Rate.CONSTANT_MEMBERSHIP)\n self.assertEqual(cl.rate([]), Rate.NOTPREFERRED)\n \n # Code generation.\n code = cl.get_code(['x'], L.pc('pass'))\n exp_code = L.pc('''\n for y in setmatch(R, 'bu', x):\n pass\n ''')\n self.assertEqual(code, exp_code)\n \n def test_enumclause_setmatch(self):\n # Make sure we can convert clauses over setmatches.\n cl = EnumClause.from_AST(\n L.Enumerator(L.tuplify(['y'], lval=True),\n L.SetMatch(L.ln('R'), 'bu', L.ln('x'))),\n DummyFactory)\n exp_cl = EnumClause(('x', 'y'), 'R')\n self.assertEqual(cl, exp_cl)\n \n def test_subclause(self):\n cl = SubClause(EnumClause(('x', 'y'), 'R'), L.pe('e'))\n \n # AST round-trip.\n clast = cl.to_AST()\n exp_clast = L.Enumerator(L.tuplify(['x', 'y'], lval=True),\n L.pe('R - {e}'))\n self.assertEqual(clast, exp_clast)\n cl2 = SubClause.from_AST(exp_clast, DummyFactory)\n self.assertEqual(cl2, cl)\n \n # Attributes.\n self.assertEqual(cl.enumlhs, ('x', 'y'))\n self.assertFalse(cl.robust)\n \n # Code generation.\n code = cl.get_code([], L.pc('pass'))\n exp_code = L.pc('''\n for (x, y) in R:\n if (x, y) != e:\n pass\n ''')\n self.assertEqual(code, exp_code)\n \n def test_augclause(self):\n cl = AugClause(EnumClause(('x', 'y'), 'R'), L.pe('e'))\n \n # AST round-trip.\n clast = cl.to_AST()\n exp_clast = L.Enumerator(L.tuplify(['x', 'y'], lval=True),\n L.pe('R + {e}'))\n self.assertEqual(clast, exp_clast)\n cl2 = AugClause.from_AST(exp_clast, DummyFactory)\n self.assertEqual(cl2, cl)\n \n # Attributes.\n self.assertEqual(cl.enumlhs, ('x', 'y'))\n self.assertFalse(cl.robust)\n \n # Code generation.\n code = cl.get_code([], L.pc('pass'))\n exp_code = L.pc('''\n for (x, y) in R:\n pass\n (x, y) = e\n pass\n ''')\n self.assertEqual(code, exp_code)\n \n def test_lookupclause(self):\n cl = LookupClause(('x', 'y', 'z'), 'R')\n \n # AST round-trip.\n clast = cl.to_AST()\n sm = L.SMLookup(L.ln('R'), 'bbu', L.tuplify(['x', 'y']), None)\n exp_clast = L.Enumerator(L.sn('z'), L.Set((sm,)))\n self.assertEqual(clast, exp_clast)\n cl2 = LookupClause.from_AST(exp_clast, DummyFactory)\n self.assertEqual(cl2, cl)\n \n # Attributes.\n self.assertEqual(cl.enumvars, ('x', 'y', 'z'))\n \n # Rewriting.\n cl2 = cl.rewrite_subst({'x': 'xx', 'z': 'zz'}, DummyFactory)\n self.assertEqual(cl2, LookupClause(('xx', 'y', 'zz'), 'R'))\n \n # Rating.\n self.assertEqual(cl.rate(['x']), Rate.NORMAL)\n self.assertEqual(cl.rate(['x', 'y']), Rate.CONSTANT)\n \n def test_singletonclause(self):\n cl = SingletonClause(('x', 'y'), L.pe('e'))\n \n # From expression.\n cl2 = SingletonClause.from_expr(L.pe('(x, y) == e'))\n self.assertEqual(cl, cl2)\n \n # AST round-trip.\n clast = cl.to_AST()\n exp_clast = L.Enumerator(L.tuplify(['x', 'y'], lval=True),\n L.pe('{e}'))\n self.assertEqual(clast, exp_clast)\n cl2 = SingletonClause.from_AST(exp_clast, DummyFactory)\n self.assertEqual(cl2, cl)\n \n # Attributes.\n self.assertEqual(cl.enumvars, ('x', 'y'))\n \n # Code generation.\n code = cl.get_code([], L.pc('pass'))\n exp_code = L.pc('''\n x, y = e\n pass\n ''')\n self.assertEqual(code, exp_code)\n \n def test_deltaclause(self):\n cl = DeltaClause(('x', 'y'), 'R', L.pe('e'), 1)\n \n # AST round-trip.\n clast = cl.to_AST()\n exp_clast = L.Enumerator(L.tuplify(['x', 'y'], lval=True),\n L.pe('deltamatch(R, \"bb\", e, 1)'))\n self.assertEqual(clast, exp_clast)\n cl2 = DeltaClause.from_AST(exp_clast, DummyFactory)\n self.assertEqual(cl2, cl)\n \n # Attributes.\n self.assertEqual(cl.rel, 'R')\n \n # Code generation, no fancy mask.\n code = cl.get_code([], L.pc('pass'))\n exp_code = L.pc('''\n x, y = e\n pass\n ''')\n self.assertEqual(code, exp_code)\n \n # Code generation, fancy mask.\n cl2 = DeltaClause(('x', 'x', '_'), 'R', L.pe('e'), 1)\n code = cl2.get_code([], L.pc('pass'))\n exp_code = L.pc('''\n for x in setmatch(deltamatch(R, 'b1w', e, 1), 'u1w', ()):\n pass\n ''')\n self.assertEqual(code, exp_code)\n \n def test_condclause(self):\n cl = CondClause(L.pe('f(a) or g(b)'))\n \n # AST round-trip.\n clast = cl.to_AST()\n exp_clast = L.pe('f(a) or g(b)')\n self.assertEqual(clast, exp_clast)\n cl2 = CondClause.from_AST(exp_clast, DummyFactory)\n self.assertEqual(cl2, cl)\n \n # fits_string().\n self.assertTrue(cl.fits_string(['a', 'b'], 'f(a) or g(b)'))\n \n # Attributes.\n self.assertEqual(cl.enumvars, ())\n self.assertEqual(cl.vars, ('a', 'b'))\n cl2 = CondClause(L.pe('a == b'))\n self.assertEqual(cl2.eqvars, ('a', 'b'))\n \n # Rating.\n self.assertEqual(cl.rate(['a', 'b']), Rate.CONSTANT)\n self.assertEqual(cl.rate(['a']), Rate.UNRUNNABLE)\n \n # Code generation.\n code = cl.get_code(['a', 'b'], L.pc('pass'))\n exp_code = L.pc('''\n if f(a) or g(b):\n pass\n ''')\n self.assertEqual(code, exp_code)\n \n def test_clausefactory(self):\n # Construct from AST.\n clast = L.Enumerator(L.tuplify(['x', 'y'], lval=True),\n L.pe('R - {e}'))\n cl = ClauseFactory.from_AST(clast)\n exp_cl = SubClause(EnumClause(('x', 'y'), 'R'), L.pe('e'))\n self.assertEqual(cl, exp_cl)\n \n # rewrite_subst().\n cl = SubClause(EnumClause(('x', 'y'), 'R'), L.pe('e'))\n cl = ClauseFactory.rewrite_subst(cl, {'x': 'z'})\n exp_cl = SubClause(EnumClause(('z', 'y'), 'R'), L.pe('e'))\n self.assertEqual(cl, exp_cl)\n \n # bind().\n cl = EnumClause(('x', 'y'), 'R')\n cl = ClauseFactory.bind(cl, L.pe('e'), augmented=False)\n exp_cl = DeltaClause(['x', 'y'], 'R', L.pe('e'), 1)\n self.assertEqual(cl, exp_cl)\n \n # subtract().\n cl = EnumClause(('x', 'y'), 'R')\n cl = ClauseFactory.subtract(cl, L.pe('e'))\n exp_cl = SubClause(EnumClause(('x', 'y'), 'R'), L.pe('e'))\n self.assertEqual(cl, exp_cl)\n \n # augment().\n cl = EnumClause(['x', 'y'], 'R')\n cl = ClauseFactory.augment(cl, L.pe('e'))\n exp_cl = AugClause(EnumClause(['x', 'y'], 'R'), L.pe('e'))\n self.assertEqual(cl, exp_cl)\n \n # rewrite_rel().\n cl = SubClause(EnumClause(('x', 'y'), 'R'), L.pe('e'))\n cl = ClauseFactory.rewrite_rel(cl, 'S')\n exp_cl = SubClause(EnumClause(('x', 'y'), 'S'), L.pe('e'))\n self.assertEqual(cl, exp_cl)\n \n # membercond_to_enum().\n cl = CondClause(L.pe('(x, y) in R'))\n cl = ClauseFactory.membercond_to_enum(cl)\n exp_cl = EnumClause(('x', 'y'), 'R')\n self.assertEqual(cl, exp_cl)\n \n # enum_to_membercond().\n cl = EnumClause(('x', 'y'), 'R')\n cl = ClauseFactory.enum_to_membercond(cl)\n exp_cl = CondClause(L.pe('(x, y) in R'))\n self.assertEqual(cl, exp_cl)\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.416481077671051, "alphanum_fraction": 0.47141796350479126, "avg_line_length": 31.095237731933594, "blob_id": "5c5296e1e6dd4fdddcad2b9e7bd1371af893d06d", "content_id": "41efb149228d2853725465dd04f715ec22b60b83", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1347, "license_type": "no_license", "max_line_length": 66, "num_lines": 42, "path": "/incoq/tests/programs/auxmap/degenerate_out.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from incoq.runtime import *\n_m_R_uu = Map()\ndef _maint__m_R_uu_add(_e):\n (v3_1, v3_2) = _e\n if (() not in _m_R_uu):\n _m_R_uu[()] = set()\n _m_R_uu[()].add((v3_1, v3_2))\n\ndef _maint__m_R_uu_remove(_e):\n (v4_1, v4_2) = _e\n _m_R_uu[()].remove((v4_1, v4_2))\n if (len(_m_R_uu[()]) == 0):\n del _m_R_uu[()]\n\n_m_R_bb = Map()\ndef _maint__m_R_bb_add(_e):\n (v1_1, v1_2) = _e\n if ((v1_1, v1_2) not in _m_R_bb):\n _m_R_bb[(v1_1, v1_2)] = set()\n _m_R_bb[(v1_1, v1_2)].add(())\n\ndef _maint__m_R_bb_remove(_e):\n (v2_1, v2_2) = _e\n _m_R_bb[(v2_1, v2_2)].remove(())\n if (len(_m_R_bb[(v2_1, v2_2)]) == 0):\n del _m_R_bb[(v2_1, v2_2)]\n\nfor (x, y) in [(1, 2), (1, 3), (2, 3), (1, 4)]:\n # Begin maint _m_R_uu after \"R.add((x, y))\"\n _maint__m_R_uu_add((x, y))\n # End maint _m_R_uu after \"R.add((x, y))\"\n # Begin maint _m_R_bb after \"R.add((x, y))\"\n _maint__m_R_bb_add((x, y))\n # End maint _m_R_bb after \"R.add((x, y))\"\n# Begin maint _m_R_bb before \"R.remove((1, 4))\"\n_maint__m_R_bb_remove((1, 4))\n# End maint _m_R_bb before \"R.remove((1, 4))\"\n# Begin maint _m_R_uu before \"R.remove((1, 4))\"\n_maint__m_R_uu_remove((1, 4))\n# End maint _m_R_uu before \"R.remove((1, 4))\"\nprint(sorted((_m_R_bb[(1, 2)] if ((1, 2) in _m_R_bb) else set())))\nprint(sorted((_m_R_uu[()] if (() in _m_R_uu) else set())))" }, { "alpha_fraction": 0.40656334161758423, "alphanum_fraction": 0.45670008659362793, "avg_line_length": 39.66666793823242, "blob_id": "8c8495d70a1e08ea28b85237d444f3e913d61b08", "content_id": "5fb30f547244423333739bfbe5e69307ad601b1e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1097, "license_type": "no_license", "max_line_length": 99, "num_lines": 27, "path": "/incoq/tests/programs/deminc/tup/basic_out.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from incoq.runtime import *\n_m_R_out = Map()\ndef _maint__m_R_out_add(_e):\n (v1_1, v1_2) = _e\n if (v1_1 not in _m_R_out):\n _m_R_out[v1_1] = set()\n _m_R_out[v1_1].add(v1_2)\n\ndef query_Comp1(a):\n 'a -> {e : (a, _tup1) in R, (_tup1, b2, _) in _TUP2, (b2, _tup2) in R, (_tup2, _, e) in _TUP2}'\n result = set()\n for _tup1 in (_m_R_out[a] if (a in _m_R_out) else set()):\n if (isinstance(_tup1, tuple) and (len(_tup1) == 2)):\n for b2 in setmatch({(_tup1, _tup1[0], _tup1[1])}, 'buw', _tup1):\n for _tup2 in (_m_R_out[b2] if (b2 in _m_R_out) else set()):\n if (isinstance(_tup2, tuple) and (len(_tup2) == 2)):\n for e in setmatch({(_tup2, _tup2[0], _tup2[1])}, 'bwu', _tup2):\n if (e not in result):\n result.add(e)\n return result\n\nfor (x, y) in [(1, (2, 3)), (2, (3, 4)), (3, (4, 5))]:\n # Begin maint _m_R_out after \"R.add((x, y))\"\n _maint__m_R_out_add((x, y))\n # End maint _m_R_out after \"R.add((x, y))\"\na = 1\nprint(sorted(query_Comp1(a)))" }, { "alpha_fraction": 0.4920634925365448, "alphanum_fraction": 0.5485008955001831, "avg_line_length": 24.772727966308594, "blob_id": "a24cdbf6b42d1e7f08a39e085d6e30c00c61f373", "content_id": "b86970a0634d7912c5d3ef37f787d5602ca1a4b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 567, "license_type": "no_license", "max_line_length": 78, "num_lines": 22, "path": "/incoq/tests/programs/comp/uset/lru_in.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "# LRU cache on a U-set.\n\nfrom incoq.runtime import *\n\nE = Set()\n\nfor v1, v2 in [(1, 2), (2, 3), (2, 4), (4, 5)]:\n E.add((v1, v2))\n\nQUERYOPTIONS(\n '{z for (x2, y) in E for (y2, z) in E if x == x2 if y == y2}',\n params = ['x'],\n uset_mode = 'all',\n uset_lru = 2,\n impl = 'inc',\n)\n\n# Tracing the execution of this loop, it should be the case that 1 and 2\n# get added, then 1 is pinged, then when 3 is added 2 is removed, then\n# 1 is pinged again.\nfor x in [1, 2, 1, 3, 1]:\n print(sorted({z for (x2, y) in E for (y2, z) in E if x == x2 if y == y2}))\n" }, { "alpha_fraction": 0.6537036895751953, "alphanum_fraction": 0.6537036895751953, "avg_line_length": 19, "blob_id": "c92b78721fafc81a86deb39845776e3a1428e3af", "content_id": "3f6a96d38befc91d58471161950412d3f6c63b46", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 540, "license_type": "no_license", "max_line_length": 95, "num_lines": 27, "path": "/experiments/wifi/wifi_osq.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "# Run using the OSQ system.\n\nfrom incoq.runtime import *\nfrom osq import query\n\ndef make_wifi(threshold):\n wifi = Obj()\n wifi.scan = Set()\n wifi.threshold = threshold\n return wifi\n\ndef make_ap(ssid, strength):\n ap = Obj()\n ap.ssid = ssid\n ap.strength = strength\n return ap\n\ndef add_ap(wifi, ap):\n wifi.scan.add(ap)\n\ndef remove_ap(wifi, ap):\n wifi.scan.remove(ap)\n\ndef do_query(wifi):\n return query('wifi -> {ap.ssid for ap in wifi.scan if ap.strength > wifi.threshold}', wifi)\n\ndo_query_nodemand = do_query\n" }, { "alpha_fraction": 0.7169811129570007, "alphanum_fraction": 0.7169811129570007, "avg_line_length": 16.66666603088379, "blob_id": "8ba9da9a18d346d8d837ce355f182fd35edde32d", "content_id": "78447c49e9007020e741bed33529e3e96c83dfb2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 53, "license_type": "no_license", "max_line_length": 28, "num_lines": 3, "path": "/experiments/wifi/wifi_orig.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from .wifi_in import *\n\ndo_query_nodemand = do_query\n" }, { "alpha_fraction": 0.4714104235172272, "alphanum_fraction": 0.4993646740913391, "avg_line_length": 33.260868072509766, "blob_id": "284462f581e6baa35d74b5228874f10a9d18d01f", "content_id": "c1228ec8c003aefa539d2dd4cdde24515344ddff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 787, "license_type": "no_license", "max_line_length": 66, "num_lines": 23, "path": "/incoq/tests/programs/comp/nested/outline_out.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from incoq.runtime import *\n# Comp1 := {(s, x) : (s, x) in _M, (x, x_a) in _F_a, (x_a > 1)}\n# Comp6 := {(s, y_b) : (s, y) in Comp1, (y, y_b) in _F_b}\n_m_Comp6_out = Map()\n_m__M_in = Map()\n_m_Comp1_in = Map()\nComp6 = RCSet()\nComp1 = RCSet()\ns = Set()\nfor i in [1, 2, 3]:\n o = Obj()\n o.a = i\n # Begin maint Comp1 after \"_F_a.add((o, i))\"\n # End maint Comp1 after \"_F_a.add((o, i))\"\n o.b = (i * 2)\n # Begin maint Comp6 after \"_F_b.add((o, (i * 2)))\"\n # End maint Comp6 after \"_F_b.add((o, (i * 2)))\"\n s.add(o)\n # Begin maint _m__M_in after \"_M.add((s, o))\"\n # End maint _m__M_in after \"_M.add((s, o))\"\n # Begin maint Comp1 after \"_M.add((s, o))\"\n # End maint Comp1 after \"_M.add((s, o))\"\nprint(sorted((_m_Comp6_out[s] if (s in _m_Comp6_out) else set())))" }, { "alpha_fraction": 0.574013352394104, "alphanum_fraction": 0.5754556059837341, "avg_line_length": 30.77916717529297, "blob_id": "bfc94a6c81ac2aa1a775aa7d7b0d002c4276be32", "content_id": "b1ba2d005ab519ef6df28b9fff0ccdd3958c5116", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7627, "license_type": "no_license", "max_line_length": 73, "num_lines": 240, "path": "/incoq/compiler/tup/flatten.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Flatten relations that have a nested tuple structure.\"\"\"\n\n\n__all__ = [\n 'flatten_relations',\n]\n\n\nimport incoq.compiler.incast as L\n\n\ndef tuptree_to_type(node):\n \"\"\"Given a tuple tree of variables, return the type structure.\"\"\"\n if isinstance(node, L.Tuple):\n return ('<T>',) + tuple(tuptree_to_type(elt)\n for elt in node.elts)\n elif isinstance(node, L.Name):\n return node.id\n else:\n assert()\n\n\ndef tuptype_leaves(tuptype):\n \"\"\"Given a tuple tree type (in the form of the domain types for\n relations), return a list of paths to leaf components, i.e.\n non-tuple components. Each path is itself a sequence of indices,\n such that subscripting with each index in turn brings us from the\n root tuple value to the leaf value.\n \"\"\"\n leaves = []\n \n def process(t, path):\n if isinstance(t, tuple) and t[0] == '<T>':\n for i, elt in enumerate(t[1:]):\n process(elt, path + (i,))\n else:\n leaves.append(path)\n \n process(tuptype, ())\n return leaves\n\n\ndef make_flattup_code(tuptype, in_node, out_node, tempvar):\n \"\"\"Given a tuple tree type, make code to take the tuple given by the\n expression in_node and store the flattened form in the tuple given\n by out_node.\n \"\"\"\n def leaf_to_expr(root, path):\n \"\"\"Turn a leaf path into a series of subscript expressions\n that obtain the leaf value from the root value.\n \"\"\"\n node = root\n for i in path:\n node = L.Subscript(node, L.Index(L.Num(i)), L.Load())\n return node\n \n leaves = tuptype_leaves(tuptype)\n code = ()\n \n # If in_expr is just a variable name, use it as is.\n # Otherwise store it in a temporary variable to avoid redundant\n # evaluation of in_expr.\n if isinstance(in_node, L.Name):\n root_node = in_node\n else:\n rootname = tempvar\n code += L.pc('''\n ROOT = IN_NODE\n ''', subst={'IN_NODE': in_node,\n 'ROOT': L.sn(rootname)})\n root_node = L.ln(rootname)\n \n flattuple_expr = L.Tuple(tuple(leaf_to_expr(root_node, leaf)\n for leaf in leaves),\n L.Load())\n code += L.pc('''\n OUT_NODE = FLATTUP\n ''', subst={'FLATTUP': flattuple_expr,\n 'OUT_NODE': out_node})\n \n return code\n\n\nclass UpdateFlattener(L.NodeTransformer):\n \n \"\"\"Rewrite updates to the given set to use the flattened form.\"\"\"\n \n def __init__(self, rel, tuptype, namegen):\n super().__init__()\n self.rel = rel\n self.tuptype = tuptype\n self.namegen = namegen\n \n def visit_SetUpdate(self, node):\n if not (isinstance(node.target, L.Name) and\n node.target.id == self.rel):\n return\n \n fresh = next(self.namegen)\n tvar = '_t' + fresh\n ftvar = '_ft' + fresh\n code = make_flattup_code(self.tuptype, node.elem,\n L.sn(ftvar), tvar)\n update = node._replace(elem=L.ln(ftvar))\n return code + (update,)\n\n\ndef get_clause_vars(enum_node, tuptype):\n \"\"\"Given a clause and tuple tree type, return a list of the\n variables or wildcards used on the tuple tree on the LHS.\n The LHS must exactly match the structure of the type.\n \"\"\"\n vars = []\n \n def process(node, t):\n if isinstance(t, tuple) and t[0] == '<T>':\n assert (isinstance(node, L.Tuple) and\n len(node.elts) == len(t) - 1)\n for n, t2 in zip(node.elts, t[1:]):\n process(n, t2)\n else:\n assert isinstance(node, L.Name)\n vars.append(node.id)\n \n process(enum_node.target, tuptype)\n return vars\n\n\nclass ClauseFlattener(L.NodeTransformer):\n \n \"\"\"Rewrite all clauses over the given relation to use the flattened\n form of its type. This requires that the tuple tree structure of\n the clauses exactly matches that of the type. In particular,\n variables that summarize tuple components in the type are not\n allowed.\n \"\"\"\n \n def __init__(self, rel, tuptype):\n super().__init__()\n self.rel = rel\n self.tuptype = tuptype\n \n def visit_Enumerator(self, node):\n node = self.generic_visit(node)\n \n if not (isinstance(node.iter, L.Name) and\n node.iter.id == self.rel):\n return\n \n vars = get_clause_vars(node, self.tuptype)\n new_lhs = L.tuplify(vars, lval=True)\n return node._replace(target=new_lhs)\n\n\nclass ReltypeGetter(L.NodeVisitor):\n \n \"\"\"Gets type information for a given relation based on enumerators\n over it. All the enumerators over it must have the exact same tuple\n structure. The type must not be a singleton tuple, but it can be\n a singular non-tuple value. If there are no enumerators over it,\n a singular non-tuple value type is returned.\n \"\"\"\n \n def __init__(self, rel):\n super().__init__()\n self.rel = rel\n \n def process(self, tree):\n self.tuptype = None\n super().process(tree)\n \n if self.tuptype is None:\n self.tuptype = '_'\n if isinstance(self.tuptype, tuple) and self.tuptype[0] == '<T>':\n assert len(self.tuptype) > 2, \\\n 'Type of {} is singleton tuple'.format(self.rel)\n return self.tuptype\n \n def visit_Enumerator(self, node):\n self.generic_visit(node)\n \n if not (isinstance(node.iter, L.Name) and\n node.iter.id == self.rel):\n return\n \n tuptype = tuptree_to_type(node.target)\n if self.tuptype is None:\n self.tuptype = tuptype\n else:\n assert self.tuptype == tuptype\n\n\ndef path_to_elttype(path, vartype):\n \"\"\"Given a path (sequence of tuple tree indices) and a vartype\n (as in the manager), return the type for that component or None.\n \"\"\"\n if not isinstance(vartype, L.SetType):\n return None\n \n def helper(path, vartype):\n next, *rest = path\n if not (isinstance(vartype, L.TupleType) and\n next < len(vartype.ets)):\n return None\n \n if len(rest) == 0:\n return vartype.ets[next]\n else:\n return helper(rest, vartype.ets[next])\n \n return helper(path, vartype.et)\n\n\ndef flatten_relations(tree, rels, manager):\n \"\"\"Return a modified tree where the structure of the given relations\n is flattened.\n \n This only works when the relation is only read via comprehension\n enumerators. Each clause over the relation must use a tuple\n structure that exactly matches the relation's type information.\n \"\"\"\n for rel in rels:\n tuptype = ReltypeGetter.run(tree, rel)\n # Skip if tuptype is just a variable with no tuple.\n if not (isinstance(tuptype, tuple) and tuptype[0] == '<T>'):\n continue\n tree = ClauseFlattener.run(tree, rel, tuptype)\n tree = UpdateFlattener.run(tree, rel, tuptype, manager.namegen)\n \n # Update relation type in manager.\n orig_vartype = manager.vartypes.get(rel, None)\n if orig_vartype is not None:\n paths = tuptype_leaves(tuptype)\n elt_types = [path_to_elttype(p, orig_vartype) for p in paths]\n if None not in elt_types:\n manager.vartypes[rel] = L.SetType(L.TupleType(elt_types))\n else:\n del manager.vartypes[rel]\n \n return tree\n" }, { "alpha_fraction": 0.44488978385925293, "alphanum_fraction": 0.5070140361785889, "avg_line_length": 20.69565200805664, "blob_id": "65080ba4b79bf86bf61ed4f319387c5fb52f28d4", "content_id": "e3ad717d5e46a4e791f10a7ce1eaafd8d32ae20e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 499, "license_type": "no_license", "max_line_length": 70, "num_lines": 23, "path": "/incoq/tests/programs/comp/deltawild_in.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "# Check handling of deltas to enumerators with wildcards.\n\nfrom incoq.runtime import *\n\nQUERYOPTIONS(\n '{(x, w) for (x, y, z) in S for (z2, w) in T if z == z2}',\n impl = 'inc',\n)\n\nS = Set()\nT = Set()\n\nfor v1, v2 in [(2, 4), (3, 5)]:\n T.add((v1, v2))\n\nfor v1, v2, v3 in [(1, 1, 2), (1, 2, 2), (1, 2, 3)]:\n S.add((v1, v2, v3))\n\nprint(sorted({(x, w) for (x, y, z) in S for (z2, w) in T if z == z2}))\n\nT.remove((2, 4))\n\nprint(sorted({(x, w) for (x, y, z) in S for (z2, w) in T if z == z2}))\n" }, { "alpha_fraction": 0.5243145823478699, "alphanum_fraction": 0.5251043438911438, "avg_line_length": 28.942567825317383, "blob_id": "0c8033366ba1190a18144f08ab8c7683431cb7f3", "content_id": "d5a8a022be6f8290ac43b16def67b513a0d77213", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8863, "license_type": "no_license", "max_line_length": 74, "num_lines": 296, "path": "/incoq/compiler/set/auxmap.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Auxiliary maps over relations.\"\"\"\n\n# WISHLIST: Add an analysis to determine when it's safe to use the\n# update element components directly, and either avoid generating\n# the assignment to the v vars, or else remove it with a later\n# processing step.\n\n\n__all__ = [\n 'AuxmapMaintainer',\n 'inc_relmatch',\n 'RelmatchQueryFinder',\n 'DeltaMatchRewriter',\n 'inc_all_relmatch',\n]\n\n\nfrom incoq.util.collections import OrderedSet\nimport incoq.compiler.incast as L\n\nfrom .mask import Mask, AuxmapSpec\n\n\ndef get_relmatch(node):\n id, mask, key = L.get_namematch(node)\n return AuxmapSpec(id, Mask(mask)), key\n\nis_relmatch = L.is_namematch\n\ndef get_relsmlookup(node):\n id, mask, key = L.get_namesmlookup(node)\n return AuxmapSpec(id, Mask(mask)), key\n\nis_relsmlookup = L.is_namesmlookup\n\n\ndef make_vareq_cond(eqs):\n \"\"\"Given a list of pairs of variables, return a conjunction of\n equalities, one for each pair.\n \"\"\"\n eqcond = L.BoolOp(L.And(), tuple(L.cmpeq(L.ln(v1), L.ln(v2))\n for v1, v2 in eqs))\n return eqcond\n\n\ndef make_auxmap_maint_code(manager, spec, elem, addremove):\n \"\"\"Construct auxmap maintenance code for a set update.\"\"\"\n assert addremove in ['add', 'remove']\n \n prefix = manager.namegen.next_prefix()\n mask = spec.mask\n \n # Create fresh variables for the tuple components.\n vars = [prefix + str(i) for i in range(1, len(mask) + 1)]\n bvars, uvars, eqs = mask.split_vars(vars)\n \n vars_node = L.tuplify(vars, lval=True)\n map_node = L.ln(spec.map_name)\n bvars_node = L.tuplify(bvars)\n uvars_node = L.tuplify(uvars)\n \n # If there are equalities, include a conditional check for the\n # constraints being satisfied. If there are wildcards, make the\n # image set manipulation operations reference-counted.\n #\n # Avoid these in cases where we don't have equalities/wildcards,\n # to help reduce constant-factor bloat in code size and running\n # time.\n \n if mask.has_equalities:\n template = '''\n VARS = ELEM\n if EQCOND:\n MAP.IMGOP(BVARS, UVARS)\n '''\n eqcond = make_vareq_cond(eqs)\n else:\n template = '''\n VARS = ELEM\n MAP.IMGOP(BVARS, UVARS)\n '''\n eqcond = None\n \n if mask.has_wildcards:\n imgop = {'add': 'rcimgadd',\n 'remove': 'rcimgremove'}[addremove]\n else:\n imgop = {'add': 'imgadd',\n 'remove': 'imgremove'}[addremove]\n \n code = L.pc(template, subst={\n '@IMGOP': imgop,\n 'VARS': vars_node,\n 'ELEM': elem,\n 'MAP': map_node,\n 'BVARS': bvars_node,\n 'UVARS': uvars_node,\n 'EQCOND': eqcond})\n \n return code\n\n\nclass AuxmapMaintainer(L.NodeTransformer):\n \n \"\"\"Auxiliary map maintenance transformer.\"\"\"\n \n def __init__(self, manager, spec):\n super().__init__()\n self.manager = manager\n self.spec = spec\n \n mapname = self.spec.map_name\n self.addfunc_name = '_maint_{}_add'.format(mapname)\n self.removefunc_name = '_maint_{}_remove'.format(mapname)\n \n def visit_Module(self, node):\n mapname = self.spec.map_name\n addcode = make_auxmap_maint_code(self.manager, self.spec,\n L.ln('_e'), 'add')\n removecode = make_auxmap_maint_code(self.manager, self.spec,\n L.ln('_e'), 'remove')\n \n code = L.pc('''\n MAP = Map()\n def ADDFUNC(_e):\n ADDCODE\n def REMOVEFUNC(_e):\n REMOVECODE\n ''', subst={'MAP': L.sn(mapname),\n '<def>ADDFUNC': self.addfunc_name,\n '<c>ADDCODE': addcode,\n '<def>REMOVEFUNC': self.removefunc_name,\n '<c>REMOVECODE': removecode})\n \n node = node._replace(body=code + node.body)\n \n node = self.generic_visit(node)\n \n return node\n \n def visit_SetUpdate(self, node):\n node = self.generic_visit(node)\n \n # No action if\n # - this is not an update to a variable\n # - this is not the variable you are looking for (jedi hand wave)\n if not node.is_varupdate():\n return node\n var, op, elem = node.get_varupdate()\n if var != self.spec.rel:\n return node\n \n precode = postcode = ()\n if op == 'add':\n postcode = L.pc('ADDFUNC(ELEM)',\n subst={'ADDFUNC': self.addfunc_name,\n 'ELEM': elem})\n elif op == 'remove':\n precode = L.pc('REMOVEFUNC(ELEM)',\n subst={'REMOVEFUNC': self.removefunc_name,\n 'ELEM': elem})\n else:\n assert()\n \n code = L.Maintenance(self.spec.map_name, L.ts(node),\n precode, (node,), postcode)\n return code\n\n\nclass MapqueryReplacer(L.NodeTransformer):\n \n \"\"\"Replace relmatch and smlookup queries with uses of the\n corresponding auxmap.\n \"\"\"\n \n def __init__(self, manager, spec):\n super().__init__()\n self.manager = manager\n self.spec = spec\n \n def visit_SetMatch(self, node):\n node = self.generic_visit(node)\n \n if not is_relmatch(node):\n return node\n spec, bounds = get_relmatch(node)\n \n if spec != self.spec:\n return node\n \n lookup = ('rcimglookup' if self.spec.mask.has_wildcards\n else 'imglookup')\n \n code = L.pe('''\n MAP.LOOKUP(BOUNDS)\n ''', subst={'MAP': L.ln(self.spec.map_name),\n 'BOUNDS': bounds,\n '@LOOKUP': lookup})\n return code\n \n def visit_SMLookup(self, node):\n node = self.generic_visit(node)\n \n if not is_relsmlookup(node):\n return node\n spec, key = get_relsmlookup(node)\n \n if spec != self.spec:\n return node\n \n if node.default is not None:\n code = L.pe('''\n MAP.singlelookup(KEY, DEFAULT)\n ''', subst={'MAP': L.ln(self.spec.map_name),\n 'KEY': key,\n 'DEFAULT': node.default})\n else:\n code = L.pe('''\n MAP.singlelookup(KEY)\n ''', subst={'MAP': L.ln(self.spec.map_name),\n 'KEY': key})\n \n return code\n\n\ndef inc_relmatch(tree, manager, spec):\n \"\"\"Incrementalize a relmatch query / SMLookup.\"\"\"\n if manager.options.get_opt('verbose'):\n print('Adding auxmap: ' + str(spec))\n \n tree = MapqueryReplacer.run(tree, manager, spec)\n tree = AuxmapMaintainer.run(tree, manager, spec)\n \n return tree\n\n\nclass RelmatchQueryFinder(L.NodeVisitor):\n \n \"\"\"Return the set of auxmap specs that are used by some\n relmatch query or set-map lookup.\n \"\"\"\n \n def process(self, tree):\n self.specs = OrderedSet()\n super().process(tree)\n return self.specs\n \n def visit_SetMatch(self, node):\n self.generic_visit(node)\n \n if is_relmatch(node):\n spec, _key = get_relmatch(node)\n self.specs.add(spec)\n \n def visit_SMLookup(self, node):\n self.generic_visit(node)\n \n if is_relsmlookup(node):\n spec, _key = get_relsmlookup(node)\n self.specs.add(spec)\n\n\nclass DeltaMatchRewriter(L.NodeTransformer):\n \n \"\"\"Replace DeltaMatch nodes with equivalent SetMatch-based code.\"\"\"\n \n def visit_DeltaMatch(self, node):\n mask = Mask(node.mask)\n \n if mask.has_wildcards:\n key = mask.make_projkey(node.elem)\n return L.pe('''\n ({ELEM} if setmatch(TARGET, MASK, KEY).getref(()) == LIMIT\n else {})\n ''', subst={'ELEM': node.elem,\n 'TARGET': node.target,\n 'MASK': mask.make_node(),\n 'KEY': key,\n 'LIMIT': L.Num(node.limit)})\n \n elif mask.has_equalities:\n # FIXME: Not sure how to handle this right now.\n assert()\n \n else:\n return L.pe('{ELEM}', subst={'ELEM': node.elem})\n\n\ndef inc_all_relmatch(tree, manager):\n \"\"\"Incrementalize all setmatch and smlookup queries.\"\"\"\n tree = DeltaMatchRewriter.run(tree)\n specs = RelmatchQueryFinder.run(tree)\n for spec in specs:\n tree = inc_relmatch(tree, manager, spec)\n manager.stats['auxmaps'] += 1\n return tree\n" }, { "alpha_fraction": 0.43492770195007324, "alphanum_fraction": 0.49388208985328674, "avg_line_length": 32.33333206176758, "blob_id": "effd1fcd758b6b8e7212b7c36961e7acdd653a68", "content_id": "8576e012efbaed9a2da05b1c93de5ee8a943dab5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 899, "license_type": "no_license", "max_line_length": 124, "num_lines": 27, "path": "/incoq/tests/programs/comp/inconlyonce_out.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from incoq.runtime import *\n# Comp1 := {x : (x, _) in E}\n_m_E_bw = Map()\ndef _maint__m_E_bw_add(_e):\n (v3_1, v3_2) = _e\n if (v3_1 not in _m_E_bw):\n _m_E_bw[v3_1] = RCSet()\n if (() not in _m_E_bw[v3_1]):\n _m_E_bw[v3_1].add(())\n else:\n _m_E_bw[v3_1].incref(())\n\nComp1 = RCSet()\ndef _maint_Comp1_E_add(_e):\n # Iterate {v1_x : (v1_x, _) in deltamatch(E, 'bw', _e, 1)}\n for v1_x in setmatch(({_e} if ((_m_E_bw[_e[0]] if (_e[0] in _m_E_bw) else RCSet()).getref(()) == 1) else {}), 'uw', ()):\n Comp1.add(v1_x)\n\nfor (v1, v2) in [(1, 2), (1, 3), (2, 3), (3, 4)]:\n # Begin maint _m_E_bw after \"E.add((v1, v2))\"\n _maint__m_E_bw_add((v1, v2))\n # End maint _m_E_bw after \"E.add((v1, v2))\"\n # Begin maint Comp1 after \"E.add((v1, v2))\"\n _maint_Comp1_E_add((v1, v2))\n # End maint Comp1 after \"E.add((v1, v2))\"\nprint(sorted(Comp1))\nprint(sorted(Comp1))" }, { "alpha_fraction": 0.45913371443748474, "alphanum_fraction": 0.5160075426101685, "avg_line_length": 32.62025451660156, "blob_id": "c4a3a176f1482d52ac620848d7c1ed2df6c2fb68", "content_id": "4e351a6b92eed2e8f5e232217e6a807d3d174fcc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2655, "license_type": "no_license", "max_line_length": 97, "num_lines": 79, "path": "/incoq/tests/programs/comp/uset/auto_out.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from incoq.runtime import *\n# Comp1 := {(g, x) : g in _U_Comp1, x in E, (x > g)}\n_m_Comp1_out = Map()\ndef _maint__m_Comp1_out_add(_e):\n (v5_1, v5_2) = _e\n if (v5_1 not in _m_Comp1_out):\n _m_Comp1_out[v5_1] = set()\n _m_Comp1_out[v5_1].add(v5_2)\n\ndef _maint__m_Comp1_out_remove(_e):\n (v6_1, v6_2) = _e\n _m_Comp1_out[v6_1].remove(v6_2)\n if (len(_m_Comp1_out[v6_1]) == 0):\n del _m_Comp1_out[v6_1]\n\ndef _maint_Comp1__U_Comp1_add(_e):\n # Iterate {(v1_g, v1_x) : v1_g in deltamatch(_U_Comp1, 'b', _e, 1), v1_x in E, (v1_x > v1_g)}\n v1_g = _e\n for v1_x in E:\n if (v1_x > v1_g):\n # Begin maint _m_Comp1_out after \"Comp1.add((v1_g, v1_x))\"\n _maint__m_Comp1_out_add((v1_g, v1_x))\n # End maint _m_Comp1_out after \"Comp1.add((v1_g, v1_x))\"\n\ndef _maint_Comp1__U_Comp1_remove(_e):\n # Iterate {(v2_g, v2_x) : v2_g in deltamatch(_U_Comp1, 'b', _e, 1), v2_x in E, (v2_x > v2_g)}\n v2_g = _e\n for v2_x in E:\n if (v2_x > v2_g):\n # Begin maint _m_Comp1_out before \"Comp1.remove((v2_g, v2_x))\"\n _maint__m_Comp1_out_remove((v2_g, v2_x))\n # End maint _m_Comp1_out before \"Comp1.remove((v2_g, v2_x))\"\n\ndef _maint_Comp1_E_add(_e):\n # Iterate {(v3_g, v3_x) : v3_g in _U_Comp1, v3_x in deltamatch(E, 'b', _e, 1), (v3_x > v3_g)}\n v3_x = _e\n for v3_g in _U_Comp1:\n if (v3_x > v3_g):\n # Begin maint _m_Comp1_out after \"Comp1.add((v3_g, v3_x))\"\n _maint__m_Comp1_out_add((v3_g, v3_x))\n # End maint _m_Comp1_out after \"Comp1.add((v3_g, v3_x))\"\n\n_U_Comp1 = RCSet()\n_UEXT_Comp1 = Set()\ndef demand_Comp1(g):\n '{(g, x) : g in _U_Comp1, x in E, (x > g)}'\n if (g not in _U_Comp1):\n _U_Comp1.add(g)\n # Begin maint Comp1 after \"_U_Comp1.add(g)\"\n _maint_Comp1__U_Comp1_add(g)\n # End maint Comp1 after \"_U_Comp1.add(g)\"\n else:\n _U_Comp1.incref(g)\n\ndef undemand_Comp1(g):\n '{(g, x) : g in _U_Comp1, x in E, (x > g)}'\n if (_U_Comp1.getref(g) == 1):\n # Begin maint Comp1 before \"_U_Comp1.remove(g)\"\n _maint_Comp1__U_Comp1_remove(g)\n # End maint Comp1 before \"_U_Comp1.remove(g)\"\n _U_Comp1.remove(g)\n else:\n _U_Comp1.decref(g)\n\ndef query_Comp1(g):\n '{(g, x) : g in _U_Comp1, x in E, (x > g)}'\n if (g not in _UEXT_Comp1):\n _UEXT_Comp1.add(g)\n demand_Comp1(g)\n return True\n\nE = Set()\ng = 1\nfor z in [1, 2, 3]:\n E.add(z)\n # Begin maint Comp1 after \"E.add(z)\"\n _maint_Comp1_E_add(z)\n # End maint Comp1 after \"E.add(z)\"\nprint(sorted((query_Comp1(g) and (_m_Comp1_out[g] if (g in _m_Comp1_out) else set()))))" }, { "alpha_fraction": 0.5065540075302124, "alphanum_fraction": 0.5127630233764648, "avg_line_length": 25.842592239379883, "blob_id": "3fcbfcb3c4cf901d8cad369e00847b27cea3b1ac", "content_id": "6838a36f89532782c15d1efddae6129db5b7e77b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5798, "license_type": "no_license", "max_line_length": 70, "num_lines": 216, "path": "/experiments/wifi/run_wifi_exp.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Run the wifi experiment.\"\"\"\n\n\nimport os, sys, importlib\nfrom copy import deepcopy\n\nfrom frexp import (ExpWorkflow, Datagen, Runner, Verifier,\n SimpleExtractor, MetricExtractor)\n\nfrom experiments.util import SmallExtractor, LargeExtractor, canonize\n\n\nclass WifiDatagen(Datagen):\n \n \"\"\"Procedure as in Tom's dissertation (p74-75). There's just\n one wifi parameter value, and we alternate adding a new ap, and\n performing a query that amounts to listing all aps up to this\n point.\n \n Parameters:\n N -- number of updates and queries\n \"\"\"\n \n def generate(self, P):\n # Nothing to do, since there's no randomness,\n # or really any generated data to speak of.\n return dict(\n dsparams = P,\n )\n \n progs = [\n 'wifi_orig',\n 'wifi_dem',\n 'wifi_osq',\n ]\n\n\nclass WifiDriver:\n \n def __init__(self, pipe_filename):\n import gc\n import pickle\n \n gc.disable()\n \n with open(pipe_filename, 'rb') as pf:\n dataset, prog, other_tparams = pickle.load(pf)\n os.remove(pipe_filename)\n \n \n self.dataset = dataset\n self.N = dataset['dsparams']['N']\n self.prog = prog\n self.module = None\n self.results = {}\n \n self.setUp()\n \n from frexp.util import StopWatch, user_time\n from time import process_time, perf_counter\n timer_user = StopWatch(user_time)\n timer_cpu = StopWatch(process_time)\n timer_wall = StopWatch(perf_counter)\n \n with timer_user, timer_cpu, timer_wall:\n self.run()\n \n import incoq.runtime\n self.results['size'] = incoq.runtime.get_total_structure_size(\n self.module.__dict__)\n self.results['time_user'] = timer_user.consume()\n self.results['time_cpu'] = timer_cpu.consume()\n self.results['time_wall'] = timer_wall.consume()\n \n self.results['stdmetric'] = self.results['time_cpu']\n \n \n with open(pipe_filename, 'wb') as pf:\n pickle.dump(self.results, pf)\n \n def setUp(self):\n # Import driven program.\n dirname, filename = os.path.split(self.prog)\n if dirname:\n sys.path.append(dirname)\n try:\n self.module = importlib.import_module(\n 'experiments.wifi.' + filename)\n finally:\n if dirname:\n sys.path.pop()\n \n \n m = self.module\n \n self.wifi = m.make_wifi(5)\n m.do_query(self.wifi)\n \n def run(self):\n make_ap = self.module.make_ap\n add_ap = self.module.add_ap\n do_query = self.module.do_query_nodemand\n wifi = self.wifi\n \n for i in range(self.N):\n ap = make_ap(str(i), 10)\n add_ap(wifi, ap)\n do_query(wifi)\n\nclass WifiVerifyDriver:\n \n def __init__(self, pipe_filename):\n import gc\n import pickle\n \n gc.disable()\n \n with open(pipe_filename, 'rb') as pf:\n dataset, prog, other_tparams = pickle.load(pf)\n os.remove(pipe_filename)\n \n \n self.dataset = dataset\n self.N = dataset['dsparams']['N']\n self.prog = prog\n self.module = None\n self.results = {'output': []}\n \n self.setUp()\n \n from frexp.util import StopWatch, user_time\n from time import process_time, perf_counter\n timer_user = StopWatch(user_time)\n timer_cpu = StopWatch(process_time)\n timer_wall = StopWatch(perf_counter)\n \n with timer_user, timer_cpu, timer_wall:\n self.run()\n \n self.results = canonize(self.results)\n \n with open(pipe_filename, 'wb') as pf:\n pickle.dump(self.results, pf)\n \n def setUp(self):\n # Import driven program.\n dirname, filename = os.path.split(self.prog)\n if dirname:\n sys.path.append(dirname)\n try:\n self.module = importlib.import_module(\n 'experiments.wifi.' + filename)\n finally:\n if dirname:\n sys.path.pop()\n \n \n m = self.module\n \n self.wifi = m.make_wifi(5)\n m.do_query(self.wifi)\n \n def run(self):\n make_ap = self.module.make_ap\n add_ap = self.module.add_ap\n do_query = self.module.do_query_nodemand\n wifi = self.wifi\n \n for i in range(self.N):\n ap = make_ap(str(i), 10)\n add_ap(wifi, ap)\n output = do_query(wifi)\n self.results['output'].append(deepcopy(output))\n\n\nclass Wifi(ExpWorkflow):\n \n prefix = 'results/wifi'\n \n require_ac = False ###\n \n ExpDriver = WifiDriver\n ExpVerifyDriver = WifiVerifyDriver\n \n class ExpDatagen(WifiDatagen):\n \n def get_dsparams_list(self):\n return [\n dict(\n dsid = str(x),\n x = x,\n N = x\n )\n for x in range(250, 2501, 250)\n ]\n \n stddev_window = .1\n min_repeats = 20\n max_repeats = 20\n \n class ExpExtractor(MetricExtractor, SmallExtractor):\n \n series = [\n ('wifi_orig', 'original', 'red', '- s poly2'),\n ('wifi_osq', 'OSQ', 'orange', '-- ^ poly1'),\n ('wifi_dem', 'filtered', 'green', '- ^ poly1'),\n ]\n \n ylabel = 'Running time (in seconds)'\n xlabel = 'Number of queries and updates'\n \n metric = 'time_cpu'\n \n xmin = 150\n xmax = 2600\n ymax = .7\n" }, { "alpha_fraction": 0.4127036929130554, "alphanum_fraction": 0.4582640528678894, "avg_line_length": 26.842592239379883, "blob_id": "0c5d01df775b3e21f543919c547a4c8d6bc873f7", "content_id": "6fcc2a0772ebd05531177a7366e1f5ab7ab28296", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3007, "license_type": "no_license", "max_line_length": 79, "num_lines": 108, "path": "/incoq/tests/runtimelib/test_runtimelib.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "###############################################################################\n# test_runtimelib.py #\n# Author: Jon Brandvein #\n###############################################################################\n\n\"\"\"Unit tests for the runtimelib module.\"\"\"\n\n\nimport unittest\nimport pickle\n\nfrom incoq.runtime import *\nfrom incoq.runtime.runtimelib import tupify\n\n\nclass TestRuntimelib(unittest.TestCase):\n \n def test_tupify(self):\n val1 = 1\n val2 = tupify([1])\n self.assertEqual(val1, val2)\n \n val3 = (1, 2)\n val4 = tupify([1, 2])\n self.assertEqual(val3, val4)\n \n def test_setmatch(self):\n rel = {(1, 2), (1, 3), (2, 3), 'foo', (1, 4, 5)}\n \n res1 = setmatch(rel, 'bu', 1)\n exp_res1 = {2, 3}\n self.assertEqual(res1, exp_res1)\n \n res1a = setmatch(rel, 'out', 1)\n self.assertEqual(res1a, exp_res1)\n \n res2 = setmatch(rel, 'bu', 2)\n exp_res2 = {3}\n self.assertEqual(res2, exp_res2)\n \n res3 = setmatch(rel, 'uu', ())\n exp_res3 = {(1, 2), (1, 3), (2, 3)}\n self.assertEqual(res3, exp_res3)\n \n def test_minmax(self):\n self.assertEqual(max2(3, 5, 2), 5)\n self.assertEqual(max2(None, 5, None), 5)\n self.assertEqual(max2(None, None, None), None)\n self.assertEqual(max2(), None)\n self.assertEqual(min2(3, 1, 2), 1)\n self.assertEqual(min2(None, 5, None), 5)\n self.assertEqual(min2(None, None, None), None)\n self.assertEqual(min2(), None)\n \n def test_rcset(self):\n s1 = RCSet()\n s1.add(1)\n s1.incref(1)\n self.assertCountEqual(s1, {1})\n s1.decref(1)\n s1.remove(1)\n \n s2 = RCSet()\n with self.assertRaises(AssertionError):\n s2.incref(1) \n \n s3 = RCSet()\n s3.add(1)\n s3.incref(1)\n with self.assertRaises(AssertionError):\n s3.remove(1)\n \n def test_pickle(self):\n o1 = Obj()\n o1.a = 'a'\n b = pickle.dumps(o1)\n o2 = pickle.loads(b)\n self.assertEqual(getattr(o1, 'a', None), 'a')\n self.assertEqual(getattr(o2, 'a', None), 'a')\n \n s1 = Set()\n s1.add(1)\n b = pickle.dumps(s1)\n s2 = pickle.loads(b)\n self.assertIn(1, s1)\n self.assertIn(1, s2)\n \n s1 = RCSet()\n s1.add(1)\n s1.add(2)\n s1.incref(2)\n b = pickle.dumps(s1)\n s2 = pickle.loads(b)\n self.assertIn(1, s1)\n self.assertEqual(s1.getref(2), 2)\n self.assertIn(1, s2)\n self.assertEqual(s2.getref(2), 2)\n \n m1 = Map()\n m1[1] = 2\n b = pickle.dumps(m1)\n m2 = pickle.loads(b)\n self.assertEqual(m1.get(1, None), 2)\n self.assertEqual(m2.get(1, None), 2)\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.4455445408821106, "alphanum_fraction": 0.5111386179924011, "avg_line_length": 30.705883026123047, "blob_id": "9dfcf3bbdbc1bbd55be1171d36a25b4de24a1159", "content_id": "017d848dd9ac5ab57faa3b60735cfff511a51605", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1616, "license_type": "no_license", "max_line_length": 78, "num_lines": 51, "path": "/incoq/tests/programs/comp/parameter_out.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from incoq.runtime import *\n# Comp2 := {(y, (x, y)) : (x, y) in E}\n_m_Comp2_out = Map()\ndef _maint__m_Comp2_out_add(_e):\n (v5_1, v5_2) = _e\n if (v5_1 not in _m_Comp2_out):\n _m_Comp2_out[v5_1] = set()\n _m_Comp2_out[v5_1].add(v5_2)\n\ndef _maint__m_Comp2_out_remove(_e):\n (v6_1, v6_2) = _e\n _m_Comp2_out[v6_1].remove(v6_2)\n if (len(_m_Comp2_out[v6_1]) == 0):\n del _m_Comp2_out[v6_1]\n\n_m_E_out = Map()\ndef _maint__m_E_out_add(_e):\n (v3_1, v3_2) = _e\n if (v3_1 not in _m_E_out):\n _m_E_out[v3_1] = set()\n _m_E_out[v3_1].add(v3_2)\n\ndef _maint_Comp2_E_add(_e):\n # Iterate {(v1_x, v1_y) : (v1_x, v1_y) in deltamatch(E, 'bb', _e, 1)}\n (v1_x, v1_y) = _e\n # Begin maint _m_Comp2_out after \"Comp2.add((v1_y, (v1_x, v1_y)))\"\n _maint__m_Comp2_out_add((v1_y, (v1_x, v1_y)))\n # End maint _m_Comp2_out after \"Comp2.add((v1_y, (v1_x, v1_y)))\"\n\ndef query_Comp1(x):\n 'x -> {y : (x, y) in E}'\n result = set()\n for y in (_m_E_out[x] if (x in _m_E_out) else set()):\n if (y not in result):\n result.add(y)\n return result\n\nE = Set()\nfor (v1, v2) in {(1, 2), (2, 3), (2, 4), (4, 5)}:\n E.add((v1, v2))\n # Begin maint _m_E_out after \"E.add((v1, v2))\"\n _maint__m_E_out_add((v1, v2))\n # End maint _m_E_out after \"E.add((v1, v2))\"\n # Begin maint Comp2 after \"E.add((v1, v2))\"\n _maint_Comp2_E_add((v1, v2))\n # End maint Comp2 after \"E.add((v1, v2))\"\nx = 1\ny = 5\nprint(sorted({z for (x2, y) in E for (y2, z) in E if (x == x2) if (y == y2)}))\nprint(sorted(query_Comp1(x)))\nprint(sorted((_m_Comp2_out[y] if (y in _m_Comp2_out) else set())))" }, { "alpha_fraction": 0.4214229881763458, "alphanum_fraction": 0.4229867160320282, "avg_line_length": 26.80434799194336, "blob_id": "8677c664f90226f340f84ac9d0eba1da6415ecb5", "content_id": "e35511b9b75b7ced159284a10176d6ce913ecebc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1279, "license_type": "no_license", "max_line_length": 79, "num_lines": 46, "path": "/incoq/tests/util/collections/test_orderedset.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "###############################################################################\n# test_orderedset.py #\n# Author: Jon Brandvein #\n###############################################################################\n\n\"\"\"Unit tests for the orderedset module.\"\"\"\n\n\nimport unittest\n\nfrom incoq.util.collections.orderedset import *\n\n\nclass TestOrderedSet(unittest.TestCase):\n \n def test_update(self):\n s = OrderedSet('abc')\n s.update('def')\n self.assertEqual(s, 'abcdef')\n \n s.update_union(['ghi', 'jkl'])\n self.assertEqual(s, 'abcdefghijkl')\n \n s2 = OrderedSet.from_union(['abc', 'def', 'ghi', 'jkl'])\n self.assertEqual(s, s2)\n \n def test_reverse_operator(self):\n s = set('abc')\n t = OrderedSet('def')\n \n r = s | t\n self.assertCountEqual(r, 'abcdef')\n \n def test_and(self):\n # Tests the fix for ensuring the order of a & b is taken from\n # a rather than b.\n a = OrderedSet('abcdefghij')\n b = 'jceibhdafg'\n \n r = a & b\n \n self.assertEqual(''.join(r), 'abcdefghij')\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.5691699385643005, "alphanum_fraction": 0.5909090638160706, "avg_line_length": 19.239999771118164, "blob_id": "25025d7477033d59b1427d737d311b8015dc7c73", "content_id": "df09e92ad593a75b9dae172c9c2ade03898f90ca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 506, "license_type": "no_license", "max_line_length": 62, "num_lines": 25, "path": "/incoq/tests/programs/deminc/aug1_in.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "# Demand, with the augmented self-join strategy.\n# Demand invariant maintenance should still come before\n# query maintenance at additions and after it at removals.\n\nfrom incoq.runtime import *\n\nOPTIONS(\n selfjoin_strat = 'aug',\n)\n\nQUERYOPTIONS(\n '{z for (x, y) in E for (y2, z) in E if y == y2}',\n params = ['x'],\n impl = 'dem',\n uset_force = False,\n)\n\nE = Set()\n\nfor a, b in {(1, 2), (2, 3), (2, 4)}:\n E.add((a, b))\n\nx = 1\n\nprint(sorted({z for (x, y) in E for (y2, z) in E if y == y2}))\n" }, { "alpha_fraction": 0.3103530704975128, "alphanum_fraction": 0.33156561851501465, "avg_line_length": 40.28402328491211, "blob_id": "f5985c90f29ca44c59e54b1c410ae3723ff8e485", "content_id": "4309752c76c69e0bfbcc6e5b6b955b59050aace7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 20931, "license_type": "no_license", "max_line_length": 106, "num_lines": 507, "path": "/incoq/tests/invinc/comp/test_comptrans.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Unit tests for comptrans.py.\"\"\"\n\n\nimport unittest\n\nimport incoq.compiler.incast as L\nfrom incoq.compiler.central import CentralCase\nfrom incoq.compiler.comp.compspec import CompSpec\nfrom incoq.compiler.comp.comptrans import *\nfrom incoq.compiler.comp.comptrans import (\n IncComp, RelcompMaintainer,\n SubqueryArityFinder, get_subquery_demnames)\n\n\nclass TestComp(CentralCase):\n \n def test_change_tracker(self):\n comp = L.pe('COMP({(x, y, z) for (x, y) in S '\n 'for (y, z) in T}, [], {})')\n spec = CompSpec.from_comp(comp, self.manager.factory)\n inccomp = IncComp(comp, spec, 'Q', False, None, None,\n 'no', 'das', 'auxonly', [], None)\n inccomp.change_tracker = True\n tree = L.p('''\n S.add(e)\n ''')\n tree, comps = RelcompMaintainer.run(tree, self.manager, inccomp)\n \n exp_tree = L.p('''\n Q = RCSet()\n def _maint_Q_S_add(_e):\n for (v1_x, v1_y, v1_z) in COMP({(v1_x, v1_y, v1_z)\n for (v1_x, v1_y) in deltamatch(S, 'bb', _e, 1)\n for (v1_y, v1_z) in T},\n [], {'_deltaelem': '_e',\n '_deltalhs': '(v1_x, v1_y)',\n '_deltaop': 'add',\n '_deltarel': 'S',\n 'impl': 'auxonly'}):\n Q.add((v1_x, v1_y, v1_z))\n \n def _maint_Q_S_remove(_e):\n for (v2_x, v2_y, v2_z) in COMP({(v2_x, v2_y, v2_z)\n for (v2_x, v2_y) in deltamatch(S, 'bb', _e, 1)\n for (v2_y, v2_z) in T},\n [], {'_deltaelem': '_e',\n '_deltalhs': '(v2_x, v2_y)',\n '_deltaop': 'remove',\n '_deltarel': 'S',\n 'impl': 'auxonly'}):\n Q.remove((v2_x, v2_y, v2_z))\n \n def _maint_Q_T_add(_e):\n for (v3_x, v3_y, v3_z) in COMP({(v3_x, v3_y, v3_z)\n for (v3_x, v3_y) in S\n for (v3_y, v3_z) in deltamatch(T, 'bb', _e, 1)},\n [], {'_deltaelem': '_e',\n '_deltalhs': '(v3_y, v3_z)',\n '_deltaop': 'add',\n '_deltarel': 'T',\n 'impl': 'auxonly'}):\n Q.add((v3_x, v3_y, v3_z))\n \n def _maint_Q_T_remove(_e):\n for (v4_x, v4_y, v4_z) in COMP({(v4_x, v4_y, v4_z)\n for (v4_x, v4_y) in S\n for (v4_y, v4_z) in deltamatch(T, 'bb', _e, 1)},\n [], {'_deltaelem': '_e',\n '_deltalhs': '(v4_y, v4_z)',\n '_deltaop': 'remove',\n '_deltarel': 'T',\n 'impl': 'auxonly'}):\n Q.remove((v4_x, v4_y, v4_z))\n \n with MAINT(Q, 'after', 'S.add(e)'):\n S.add(e)\n _maint_Q_S_add(e)\n ''')\n \n self.assertEqual(tree, exp_tree)\n \n def test_arityfinder(self):\n comp1 = L.pe('COMP({x for x in S}, [], {})')\n comp2 = L.pe('COMP({y for y in C1}, [], {})',\n subst={'C1': comp1})\n tree = L.p('''\n print(C2)\n ''', subst={'C2': comp2})\n arity = SubqueryArityFinder.run(tree, comp1)\n self.assertEqual(arity, 1)\n \n comp3 = L.pe('COMP({(x, x) for x in S}, [], {})')\n comp4 = L.pe('COMP({z for (z, z) in C3}, [], {})',\n subst={'C3': comp3})\n tree = L.p('''\n print(C4, C4)\n ''', subst={'C4': comp4})\n arity = SubqueryArityFinder.run(tree, comp3)\n self.assertEqual(arity, 2)\n \n comp5 = L.pe('COMP({z for (z, z) in C1}, [], {})',\n subst={'C1': comp1})\n tree = L.p('''\n print(C5)\n ''', subst={'C5': comp5})\n arity = SubqueryArityFinder.run(tree, comp1)\n self.assertEqual(arity, False)\n \n tree = L.p('''\n print(C2, C1)\n ''', subst={'C2': comp2,\n 'C1': comp1})\n arity = SubqueryArityFinder.run(tree, comp1)\n self.assertEqual(arity, False)\n \n def test_inc_relcomp_basic(self):\n comp = L.pe('COMP({(x, y) for (x, y) in S}, [x], {})')\n tree = L.p('''\n S.add((1, 2))\n print(COMP)\n ''', subst={'COMP': comp})\n tree = inc_relcomp(tree, self.manager, comp, 'Q')\n \n exp_tree = L.p('''\n Q = RCSet()\n def _maint_Q_S_add(_e):\n for (v1_x, v1_y) in COMP({(v1_x, v1_y)\n for (v1_x, v1_y) in deltamatch(S, 'bb', _e, 1)},\n [], {'_deltaelem': '_e',\n '_deltalhs': '(v1_x, v1_y)',\n '_deltaop': 'add',\n '_deltarel': 'S',\n 'impl': 'auxonly'}):\n Q.add((v1_x, (v1_x, v1_y)))\n \n def _maint_Q_S_remove(_e):\n for (v2_x, v2_y) in COMP({(v2_x, v2_y)\n for (v2_x, v2_y) in deltamatch(S, 'bb', _e, 1)},\n [], {'_deltaelem': '_e',\n '_deltalhs': '(v2_x, v2_y)',\n '_deltaop': 'remove',\n '_deltarel': 'S',\n 'impl': 'auxonly'}):\n Q.remove((v2_x, (v2_x, v2_y)))\n \n with MAINT(Q, 'after', 'S.add((1, 2))'):\n S.add((1, 2))\n _maint_Q_S_add((1, 2))\n print(setmatch(Q, 'bu', x))\n ''')\n \n self.assertEqual(tree, exp_tree)\n \n def test_inc_relcomp_noparams(self):\n comp = L.pe('COMP({(x, y) for (x, y) in S}, [], {})')\n tree = L.p('''\n S.add((1, 2))\n print(COMP)\n ''', subst={'COMP': comp})\n tree = inc_relcomp(tree, self.manager, comp, 'Q')\n \n exp_tree = L.p('''\n Q = RCSet()\n def _maint_Q_S_add(_e):\n for (v1_x, v1_y) in COMP({(v1_x, v1_y)\n for (v1_x, v1_y) in deltamatch(S, 'bb', _e, 1)},\n [], {'_deltaelem': '_e',\n '_deltalhs': '(v1_x, v1_y)',\n '_deltaop': 'add',\n '_deltarel': 'S',\n 'impl': 'auxonly'}):\n Q.add((v1_x, v1_y))\n \n def _maint_Q_S_remove(_e):\n for (v2_x, v2_y) in COMP({(v2_x, v2_y)\n for (v2_x, v2_y) in deltamatch(S, 'bb', _e, 1)},\n [], {'_deltaelem': '_e',\n '_deltalhs': '(v2_x, v2_y)',\n '_deltaop': 'remove',\n '_deltarel': 'S',\n 'impl': 'auxonly'}):\n Q.remove((v2_x, v2_y))\n \n with MAINT(Q, 'after', 'S.add((1, 2))'):\n S.add((1, 2))\n _maint_Q_S_add((1, 2))\n print(Q)\n ''')\n \n self.assertEqual(tree, exp_tree)\n \n def test_inc_relcomp_maintcomps(self):\n comp = L.pe('COMP({z for (x, y) in R for (y, z) in S}, [x], {})')\n tree = L.p('''\n R.add((1, 2))\n print(COMP)\n ''', subst={'COMP': comp})\n inccomp = make_inccomp(tree, self.manager, comp, 'Q')\n tree, maintcomps = inc_relcomp_helper(tree, self.manager, inccomp)\n \n exp_tree = L.p('''\n Q = RCSet()\n def _maint_Q_R_add(_e):\n for (v1_x, v1_y, v1_z) in COMP({(v1_x, v1_y, v1_z)\n for (v1_x, v1_y) in deltamatch(R, 'bb', _e, 1)\n for (v1_y, v1_z) in S},\n [], {'_deltaelem': '_e',\n '_deltalhs': '(v1_x, v1_y)',\n '_deltaop': 'add',\n '_deltarel': 'R',\n 'impl': 'auxonly'}):\n if ((v1_x, v1_z) not in Q):\n Q.add((v1_x, v1_z))\n else:\n Q.incref((v1_x, v1_z))\n \n def _maint_Q_R_remove(_e):\n for (v2_x, v2_y, v2_z) in COMP({(v2_x, v2_y, v2_z)\n for (v2_x, v2_y) in deltamatch(R, 'bb', _e, 1)\n for (v2_y, v2_z) in S},\n [], {'_deltaelem': '_e',\n '_deltalhs': '(v2_x, v2_y)',\n '_deltaop': 'remove',\n '_deltarel': 'R',\n 'impl': 'auxonly'}):\n if (Q.getref((v2_x, v2_z)) == 1):\n Q.remove((v2_x, v2_z))\n else:\n Q.decref((v2_x, v2_z))\n \n def _maint_Q_S_add(_e):\n for (v3_x, v3_y, v3_z) in COMP({(v3_x, v3_y, v3_z)\n for (v3_x, v3_y) in R\n for (v3_y, v3_z) in deltamatch(S, 'bb', _e, 1)},\n [], {'_deltaelem': '_e',\n '_deltalhs': '(v3_y, v3_z)',\n '_deltaop': 'add',\n '_deltarel': 'S',\n 'impl': 'auxonly'}):\n if ((v3_x, v3_z) not in Q):\n Q.add((v3_x, v3_z))\n else:\n Q.incref((v3_x, v3_z))\n \n def _maint_Q_S_remove(_e):\n for (v4_x, v4_y, v4_z) in COMP({(v4_x, v4_y, v4_z)\n for (v4_x, v4_y) in R\n for (v4_y, v4_z) in deltamatch(S, 'bb', _e, 1)},\n [], {'_deltaelem': '_e',\n '_deltalhs': '(v4_y, v4_z)',\n '_deltaop': 'remove',\n '_deltarel': 'S',\n 'impl': 'auxonly'}):\n if (Q.getref((v4_x, v4_z)) == 1):\n Q.remove((v4_x, v4_z))\n else:\n Q.decref((v4_x, v4_z))\n \n with MAINT(Q, 'after', 'R.add((1, 2))'):\n R.add((1, 2))\n _maint_Q_R_add((1, 2))\n print(setmatch(Q, 'bu', x))\n ''')\n exp_maintcomps = [\n L.pe('''COMP({(v1_x, v1_y, v1_z)\n for (v1_x, v1_y) in deltamatch(R, 'bb', _e, 1)\n for (v1_y, v1_z) in S},\n [], {'_deltaelem': '_e',\n '_deltalhs': '(v1_x, v1_y)',\n '_deltaop': 'add',\n '_deltarel': 'R',\n 'impl': 'auxonly'})'''),\n L.pe('''COMP({(v2_x, v2_y, v2_z)\n for (v2_x, v2_y) in deltamatch(R, 'bb', _e, 1)\n for (v2_y, v2_z) in S},\n [], {'_deltaelem': '_e',\n '_deltalhs': '(v2_x, v2_y)',\n '_deltaop': 'remove',\n '_deltarel': 'R',\n 'impl': 'auxonly'})'''),\n L.pe('''COMP({(v3_x, v3_y, v3_z)\n for (v3_x, v3_y) in R\n for (v3_y, v3_z) in deltamatch(S, 'bb', _e, 1)},\n [], {'_deltaelem': '_e',\n '_deltalhs': '(v3_y, v3_z)',\n '_deltaop': 'add',\n '_deltarel': 'S',\n 'impl': 'auxonly'})'''),\n L.pe('''COMP({(v4_x, v4_y, v4_z)\n for (v4_x, v4_y) in R\n for (v4_y, v4_z) in deltamatch(S, 'bb', _e, 1)},\n [], {'_deltaelem': '_e',\n '_deltalhs': '(v4_y, v4_z)',\n '_deltaop': 'remove',\n '_deltarel': 'S',\n 'impl': 'auxonly'})'''),\n ]\n \n self.assertEqual(tree, exp_tree)\n self.assertEqual(maintcomps, exp_maintcomps)\n \n self.assertEqual(exp_maintcomps[0].options['impl'], 'auxonly')\n \n def test_inc_relcomp_uset(self):\n comp = L.pe('COMP({z for (x, y) in R for (y, z) in S}, [x], '\n '{\"uset_mode\": \"all\"})')\n tree = L.p('''\n T.add(e)\n print(COMP)\n ''', subst={'COMP': comp})\n tree = inc_relcomp(tree, self.manager, comp, 'Q')\n \n exp_tree = L.p('''\n Q = RCSet()\n def _maint_Q__U_Q_add(_e):\n for (v1_x, v1_y, v1_z) in COMP({(v1_x, v1_y, v1_z)\n for v1_x in deltamatch(_U_Q, 'b', _e, 1)\n for (v1_x, v1_y) in R for (v1_y, v1_z) in S},\n [], {'_deltaelem': '_e',\n '_deltalhs': 'v1_x',\n '_deltaop': 'add',\n '_deltarel': '_U_Q',\n 'impl': 'auxonly'}):\n if ((v1_x, v1_z) not in Q):\n Q.add((v1_x, v1_z))\n else:\n Q.incref((v1_x, v1_z))\n \n def _maint_Q__U_Q_remove(_e):\n for (v2_x, v2_y, v2_z) in COMP({(v2_x, v2_y, v2_z)\n for v2_x in deltamatch(_U_Q, 'b', _e, 1)\n for (v2_x, v2_y) in R\n for (v2_y, v2_z) in S},\n [], {'_deltaelem': '_e',\n '_deltalhs': 'v2_x',\n '_deltaop': 'remove',\n '_deltarel': '_U_Q',\n 'impl': 'auxonly'}):\n if (Q.getref((v2_x, v2_z)) == 1):\n Q.remove((v2_x, v2_z))\n else:\n Q.decref((v2_x, v2_z))\n \n def _maint_Q_R_add(_e):\n for (v3_x, v3_y, v3_z) in COMP({(v3_x, v3_y, v3_z)\n for v3_x in _U_Q\n for (v3_x, v3_y) in deltamatch(R, 'bb', _e, 1)\n for (v3_y, v3_z) in S},\n [], {'_deltaelem': '_e',\n '_deltalhs': '(v3_x, v3_y)',\n '_deltaop': 'add',\n '_deltarel': 'R',\n 'impl': 'auxonly'}):\n if ((v3_x, v3_z) not in Q):\n Q.add((v3_x, v3_z))\n else:\n Q.incref((v3_x, v3_z))\n \n def _maint_Q_R_remove(_e):\n for (v4_x, v4_y, v4_z) in COMP({(v4_x, v4_y, v4_z)\n for v4_x in _U_Q\n for (v4_x, v4_y) in deltamatch(R, 'bb', _e, 1)\n for (v4_y, v4_z) in S},\n [], {'_deltaelem': '_e',\n '_deltalhs': '(v4_x, v4_y)',\n '_deltaop': 'remove',\n '_deltarel': 'R',\n 'impl': 'auxonly'}):\n if (Q.getref((v4_x, v4_z)) == 1):\n Q.remove((v4_x, v4_z))\n else:\n Q.decref((v4_x, v4_z))\n \n def _maint_Q_S_add(_e):\n for (v5_x, v5_y, v5_z) in COMP({(v5_x, v5_y, v5_z)\n for v5_x in _U_Q\n for (v5_x, v5_y) in R\n for (v5_y, v5_z) in deltamatch(S, 'bb', _e, 1)},\n [], {'_deltaelem': '_e',\n '_deltalhs': '(v5_y, v5_z)',\n '_deltaop': 'add',\n '_deltarel': 'S',\n 'impl': 'auxonly'}):\n if ((v5_x, v5_z) not in Q):\n Q.add((v5_x, v5_z))\n else:\n Q.incref((v5_x, v5_z))\n \n def _maint_Q_S_remove(_e):\n for (v6_x, v6_y, v6_z) in COMP({(v6_x, v6_y, v6_z)\n for v6_x in _U_Q\n for (v6_x, v6_y) in R\n for (v6_y, v6_z) in deltamatch(S, 'bb', _e, 1)},\n [], {'_deltaelem': '_e',\n '_deltalhs': '(v6_y, v6_z)',\n '_deltaop': 'remove',\n '_deltarel': 'S',\n 'impl': 'auxonly'}):\n if (Q.getref((v6_x, v6_z)) == 1):\n Q.remove((v6_x, v6_z))\n else:\n Q.decref((v6_x, v6_z))\n \n _U_Q = RCSet()\n _UEXT_Q = Set()\n def demand_Q(x):\n '{(x, z) : x in _U_Q, (x, y) in R, (y, z) in S}'\n if (x not in _U_Q):\n with MAINT(Q, 'after', '_U_Q.add(x)'):\n _U_Q.add(x)\n _maint_Q__U_Q_add(x)\n else:\n _U_Q.incref(x)\n \n def undemand_Q(x):\n '{(x, z) : x in _U_Q, (x, y) in R, (y, z) in S}'\n if (_U_Q.getref(x) == 1):\n with MAINT(Q, 'before', '_U_Q.remove(x)'):\n _maint_Q__U_Q_remove(x)\n _U_Q.remove(x)\n else:\n _U_Q.decref(x)\n \n def query_Q(x):\n '{(x, z) : x in _U_Q, (x, y) in R, (y, z) in S}'\n if (x not in _UEXT_Q):\n _UEXT_Q.add(x)\n demand_Q(x)\n return True\n \n T.add(e)\n print(DEMQUERY(Q, [x], setmatch(Q, 'bu', x)))\n ''')\n \n self.assertEqual(tree, exp_tree)\n \n def test_auxonly_relcomp(self):\n comp1 = L.pe(\n 'COMP({(y, z) for (x, y) in R for (y, z) in S}, [x], {})')\n comp2 = L.pe(\n 'COMP({z for (x, y) in R for (y, z) in S}, [x], {})')\n tree = L.p('''\n R.add((1, 2))\n for (y, z) in COMP1:\n pass\n for z in COMP2:\n pass\n ''', subst={'COMP1': comp1,\n 'COMP2': comp2})\n tree = impl_auxonly_relcomp(tree, self.manager, comp1, 'Q1')\n tree = impl_auxonly_relcomp(tree, self.manager, comp2, 'Q2')\n \n exp_tree = L.p('''\n def query_Q2(x):\n 'x -> {z : (x, y) in R, (y, z) in S}'\n result = set()\n for y in setmatch(R, 'bu', x):\n for z in setmatch(S, 'bu', y):\n if (z not in result):\n result.add(z)\n return result\n \n R.add((1, 2))\n Comment('Iterate x -> {(y, z) : (x, y) in R, (y, z) in S}')\n for y in setmatch(R, 'bu', x):\n for z in setmatch(S, 'bu', y):\n pass\n for z in query_Q2(x):\n pass\n ''')\n \n self.assertEqual(tree, exp_tree)\n \n def test_patternize_depatternize(self):\n orig_comp = L.pe('COMP({z for (x_2, y) in R if x == x_2 for (y_2, z) in S if y == y_2}, [x], {})')\n exp_comp = L.pe('COMP({z for (x, y) in R for (y, z) in S}, [x], {})')\n \n comp = patternize_comp(orig_comp, self.manager.factory)\n self.assertEqual(comp, exp_comp)\n comp = depatternize_comp(comp, self.manager.factory)\n self.assertEqual(comp, orig_comp)\n \n def test_get_subquery_demnames(self):\n comp = L.pe('''\n COMP({(x, y, v) for (x, y) in U for (y, z) in S\n for w in DEMQUERY(query1, [x, z], Q1)\n for v in DEMQUERY(query2, [y, w], Q2)}, [], {})\n ''')\n spec = CompSpec.from_comp(comp, self.manager.factory)\n res = get_subquery_demnames(spec)\n \n exp_spec1 = CompSpec.from_comp(L.pe('''\n COMP({(x, z) for (x, y) in U for (y, z) in S}, [], {})\n '''), self.manager.factory)\n exp_spec2 = CompSpec.from_comp(L.pe('''\n COMP({(y, w) for (x, y) in U for (y, z) in S for w in Q1}, [], {})\n '''), self.manager.factory)\n exp_res = [\n ('query1', exp_spec1),\n ('query2', exp_spec2),\n ]\n \n self.assertEqual(res, exp_res)\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.5319548845291138, "alphanum_fraction": 0.5357142686843872, "avg_line_length": 27.5, "blob_id": "792584d2db94c207c1ced281897139802f37c6dd", "content_id": "9ef3335ead2f7014c34cbddca6150450c98f07e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1596, "license_type": "no_license", "max_line_length": 65, "num_lines": 56, "path": "/incoq/tests/invinc/incast/test_structconv.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Unit tests for structconv.py.\"\"\"\n\n\nimport unittest\nimport ast\nfrom iast import PatVar\n\nfrom incoq.compiler.incast.nodes import *\nfrom incoq.compiler.incast.structconv import *\n\n\nclass StructconvCase(unittest.TestCase):\n \n def test_convert(self):\n struct_tree = Module((Return(Num(5)),))\n \n # Import.\n tree = ast.parse('return 5')\n tree = import_structast(tree)\n self.assertEqual(tree, struct_tree)\n \n # Parse.\n tree = parse_structast('return 5')\n self.assertEqual(tree, struct_tree)\n \n # Unparse.\n source = unparse_structast(struct_tree)\n exp_source = 'return 5'\n self.assertEqual(source, exp_source)\n \n # Export.\n tree = export_structast(struct_tree)\n self.assertTrue(isinstance(tree, ast.Module) and\n len(tree.body) == 1 and\n isinstance(tree.body[0], ast.Return))\n \n def test_parse(self):\n tree = parse_structast('_X + B', mode='expr',\n subst={'B': 'b'},\n patterns=True)\n exp_tree = BinOp(PatVar('_X'), Add(), Name('b', Load()))\n self.assertEqual(tree, exp_tree)\n \n def test_unparse(self):\n tree = parse_structast('pass')\n tree = tree._replace(body=(Comment('test'),) + tree.body)\n source = unparse_structast(tree)\n exp_source = trim('''\n # test\n pass\n ''')\n self.assertEqual(source, exp_source)\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.6063588261604309, "alphanum_fraction": 0.6078728437423706, "avg_line_length": 29.367816925048828, "blob_id": "f1fa660db078c728544d9b006ab75765913e86c6", "content_id": "2d92a251c42bb2c66f148f8c713d227867afec72", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2642, "license_type": "no_license", "max_line_length": 70, "num_lines": 87, "path": "/incoq/util/planner.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"A naive engine for non-deterministically stepping from an\ninitial computation state to zero or more final states.\n\"\"\"\n\n\n__all__ = [\n 'State',\n 'Planner',\n]\n\n\nfrom abc import ABCMeta, abstractmethod\n\nfrom simplestruct.type import checktype_seq\n\n\nclass State(metaclass=ABCMeta):\n \n \"\"\"Computation state.\"\"\"\n \n @abstractmethod\n def accepts(self):\n \"\"\"If this is a finished state, return True if this state\n has an answer, i.e. has not failed.\n \"\"\"\n \n @abstractmethod\n def get_answer(self):\n \"\"\"Return the solution represented by this state.\n Invalid (potentially raising ValueError) if this state\n is not a finished, accepting state.\n \"\"\"\n \n @abstractmethod\n def successors(self):\n \"\"\"Return a sequence of non-deterministic successor states\n that may immediately follow this one. For finished states,\n this must be an empty sequence. For other states, the empty\n sequence indicates failure along this computation path.\n \"\"\"\n\n\nclass Planner:\n \n \"\"\"Solver that takes an initial state and finds one or more final\n states.\n \"\"\"\n \n def process(self, states, first_only=False):\n \"\"\"Given a list of states, return a list of finished accepting\n states that are either immediately contained in this list, or\n that can be recursively derived from the other states.\n \n If first_only is True, only the first successor state for\n each state in the list will be considered.\n \"\"\"\n checktype_seq(states, State)\n \n results = []\n for state in states:\n succ = state.successors()\n if first_only:\n succ = succ[:1]\n \n if len(succ) == 0 and state.accepts():\n results.append(state)\n else:\n results.extend(self.process(succ))\n \n return results\n \n def get_all_answers(self, init_state):\n \"\"\"Return all solutions corresponding to final states\n reachable from init_state. No duplicate elimination is\n performed.\n \"\"\"\n return [s.get_answer() for s in self.process([init_state])]\n \n def get_answer(self, init_state):\n \"\"\"Return an answer from a deterministic run. If this\n run reaches failure, ValueError is raised. Note that\n this may occur even if a different run contains a solution.\n \"\"\"\n ans = self.process([init_state], first_only=True)\n if len(ans) == 0:\n raise ValueError('No solution found')\n return ans[0].get_answer()\n" }, { "alpha_fraction": 0.4990476071834564, "alphanum_fraction": 0.5504761934280396, "avg_line_length": 18.44444465637207, "blob_id": "9389f59c06bacc60817a29dabbf84d3f3e6a433c", "content_id": "c1a10592f32761655f5d6d3c23ccf8336a42c7fa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 525, "license_type": "no_license", "max_line_length": 66, "num_lines": 27, "path": "/incoq/tests/programs/aggr/lru_in.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "# Aggregate with a LRU cache.\n\nfrom incoq.runtime import *\n\nOPTIONS(\n default_impl = 'inc',\n)\n\nQUERYOPTIONS(\n '{y for (x2, y) in E if x == x2}',\n uset_mode = 'all',\n)\n\nQUERYOPTIONS(\n 'sum({y for (x2, y) in E if x == x2})',\n uset_lru = 2,\n)\n\nE = Set()\n\nfor e in [(1, 2), (1, 3), (2, 4), (2, 10), (3, 1)]:\n E.add(e)\n\n# Tracing the execution, 1 and 2 should be added, then 1 should be\n# pinged, then 2 should be removed to make way for 3.\nfor x in [1, 2, 1, 3]:\n print(sum({y for (x2, y) in E if x == x2}))\n" }, { "alpha_fraction": 0.6268907785415649, "alphanum_fraction": 0.6268907785415649, "avg_line_length": 21.884614944458008, "blob_id": "ae700f8cd319e8571c58cd682bcba0f96e9f6e6d", "content_id": "21462a5523344fb47e010d122a57b129a5d2bfec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 595, "license_type": "no_license", "max_line_length": 66, "num_lines": 26, "path": "/incoq/tests/invinc/incast/test_nodes.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Unit tests for nodes.py.\"\"\"\n\n\nimport unittest\nimport ast\nimport iast\n\nfrom incoq.compiler.incast.nodes import *\nfrom incoq.compiler.incast.nodes import incast_nodes_untyped\n\n\nclass Nodes(unittest.TestCase):\n \n def test_typed_nodes(self):\n Name('a', Load())\n BinOp(NameConstant(True), Add(), NameConstant(True, None))\n \n def test_type_adder(self):\n node = incast_nodes_untyped['Name']('x', Load())\n node = TypeAdder.run(node)\n exp_node = Name('x', Load())\n self.assertEqual(node, exp_node)\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.5428571701049805, "alphanum_fraction": 0.5714285969734192, "avg_line_length": 16.5, "blob_id": "a6a2b9aab551bcbc24e381c800901b055b5a9f3e", "content_id": "f86570e408131eeade7e4152b886e10d5be4488e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 280, "license_type": "no_license", "max_line_length": 59, "num_lines": 16, "path": "/incoq/tests/programs/comp/tup/flatten_in.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "# Check for flattening of relations that use nested tuples.\n\nfrom incoq.runtime import *\n\nOPTIONS(\n flatten_rels = ['R'],\n default_impl = 'inc',\n)\n\nR = Set()\nS = Set()\n\nR.update([(1, (2, 3)), (4, (5, 6))])\nS.update([1, 4])\n\nprint(sorted({z for (x, (y, z)) in R if x in S}))\n" }, { "alpha_fraction": 0.38711410760879517, "alphanum_fraction": 0.3924832344055176, "avg_line_length": 27.653846740722656, "blob_id": "86d3b8ccedb0b4621725188905f9368f1aab76a8", "content_id": "4db48476eeab942d50135a698ee0b354da06fd2d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3725, "license_type": "no_license", "max_line_length": 73, "num_lines": 130, "path": "/incoq/tests/invinc/obj/test_domaintrans.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Unit tests for domaintrans.py.\"\"\"\n\n\nimport unittest\n\nimport incoq.compiler.incast as L\nfrom incoq.compiler.central import CentralCase\nfrom incoq.compiler.obj.domaintrans import *\nfrom incoq.compiler.obj.domaintrans import (\n UpdateToPairTransformer, UpdateToObjTransformer,\n flatten_all_comps, unflatten_all_comps,\n AggregatePreprocessor)\n\n\nclass TestDomaintrans(CentralCase):\n \n def test_update_topair(self):\n tree = L.p('''\n x.add(y)\n T.add(x)\n o.foo = 4\n print(o.foo)\n del o.foo\n o.bar = 5\n m.assignkey(k, v)\n m.delkey(k)\n ''')\n tree = UpdateToPairTransformer.run(tree, True, {'foo'}, True,\n ['T'])\n \n exp_use_mset = True\n exp_fields = {'foo', 'bar'}\n exp_use_maprel = True\n exp_tree = L.p('''\n _M = MSet()\n _F_foo = FSet()\n _MAP = MAPSet()\n _M.add((x, y))\n T.add(x)\n _F_foo.add((o, 4))\n print(o.foo)\n _F_foo.remove((o, o.foo))\n o.bar = 5\n _MAP.add((m, k, v))\n _MAP.remove((m, k, m[k]))\n ''')\n \n self.assertEqual(tree, exp_tree)\n \n def test_update_toobj(self):\n tree = L.p('''\n _M = MSet()\n _F_foo = FSet()\n _M.add((x, y))\n _F_foo.remove((o, o.foo))\n _M.add(w)\n _F_foo.add(w)\n _MAP.add((m, k, v))\n _MAP.remove(z)\n ''')\n tree = UpdateToObjTransformer.run(tree, self.manager.namegen)\n exp_tree = L.p('''\n x.add(y)\n del o.foo\n v1_cont, v1_item = w\n v1_cont.add(v1_item)\n v2_cont, v2_item = w\n v2_cont.foo = v2_item\n m[k] = v\n v3_map, v3_key, v3_value = z\n del v3_map[v3_key]\n ''')\n self.assertEqual(tree, exp_tree)\n \n def test_aggr(self):\n tree = L.p('''\n print(sum(x, {}))\n print(sum(o.f, {}))\n print(sum(o[a.b].f, {}))\n ''')\n tree = AggregatePreprocessor.run(tree)\n \n exp_tree = L.p('''\n print(sum(COMP({_e for _e in x}, [x], {}), {}))\n print(sum(COMP({_e for _e in o.f}, [o], {}), {}))\n print(sum(COMP({_e for _e in o[a.b].f}, [o, a], {}), {}))\n ''')\n \n self.assertEqual(tree, exp_tree)\n \n def test_pairdomain(self):\n tree = L.p('''\n S.add(o)\n T.add(o)\n o.a = 5\n print(COMP({x.a.b[c] for x in S if x in T}, [S, T], {}))\n ''')\n tree = to_pairdomain(tree, self.manager, ['T'])\n \n exp_tree = L.p('''\n _M = MSet()\n _F_a = FSet()\n _F_b = FSet()\n _MAP = MAPSet()\n _M.add((S, o))\n T.add(o)\n _F_a.add((o, 5))\n print(COMP({m_x_a_b_k_c for (S, x) in _M if x in T\n for (x, x_a) in _F_a\n for (x_a, x_a_b) in _F_b\n for (x_a_b, c, m_x_a_b_k_c) in _MAP},\n [S, T], {}))\n ''')\n \n self.assertEqual(tree, exp_tree)\n \n tree = to_objdomain(tree, self.manager)\n \n exp_tree = L.p('''\n S.add(o)\n T.add(o)\n o.a = 5\n print(COMP({x.a.b[c] for x in S if x in T}, [S, T], {}))\n ''')\n \n self.assertEqual(tree, exp_tree)\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.4819819927215576, "alphanum_fraction": 0.4977477490901947, "avg_line_length": 13.800000190734863, "blob_id": "8259e072db90bcfd7699f49754a0f08bb192275b", "content_id": "bc3ee252da7536020a63ddaf909a4511cbae1384", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 444, "license_type": "no_license", "max_line_length": 55, "num_lines": 30, "path": "/incoq/tests/programs/comp/nested/outline_in.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "# Outline mode.\n\nfrom incoq.runtime import *\n\nOPTIONS(\n mode = 'outline',\n obj_domain = True,\n)\n\nQUERYOPTIONS(\n '{x for x in s if x.a > 1}',\n uset_mode = 'none',\n impl = 'inc',\n)\n\nQUERYOPTIONS(\n '{y.b for y in {x for x in s if x.a > 1}}',\n uset_mode = 'none',\n impl = 'inc'\n)\n\ns = Set()\n\nfor i in [1, 2, 3]:\n o = Obj()\n o.a = i\n o.b = i * 2\n s.add(o)\n\nprint(sorted({y.b for y in {x for x in s if x.a > 1}}))\n" }, { "alpha_fraction": 0.5635796785354614, "alphanum_fraction": 0.5652590990066528, "avg_line_length": 34.3220329284668, "blob_id": "15924637648534f2c7ff82e1d4f540a9c66b7034", "content_id": "7c04602d5a3801f5e24eb2488d74b98425062488", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8336, "license_type": "no_license", "max_line_length": 79, "num_lines": 236, "path": "/incoq/compiler/central/manager.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "###############################################################################\n# manager.py #\n# Author: Jon Brandvein #\n###############################################################################\n\n\"\"\"Central manager construction.\"\"\"\n\n\n__all__ = [\n 'get_clause_factory',\n \n 'make_manager',\n \n 'ManagerCase',\n 'CentralCase',\n]\n\n\nfrom unittest import TestCase\n\nimport incoq.compiler.incast as L\nfrom incoq.compiler.comp import Join, CompSpec, ClauseFactory\nfrom incoq.compiler.obj import ObjClauseFactory_Mixin\nfrom incoq.compiler.demand import DemClauseFactory_Mixin\nfrom incoq.compiler.tup import TupClauseFactory_Mixin\n\n\nfrom .options import OptionsManager\n\n\nclass CentralClauseFactory(TupClauseFactory_Mixin,\n DemClauseFactory_Mixin,\n ObjClauseFactory_Mixin,\n ClauseFactory):\n pass\n\n# TODO: This is an ugly hack so that clause factories can be pickled.\n# (This in turn is needed to be able to pickle comp specs.) In the\n# future I should change it so that clause factories are used as\n# instances rather than as classes. Then the objdomain and typecheck\n# flags would be instance attributes and would be naturally pickleable.\n\nclass ClauseFactory_ObjType(CentralClauseFactory):\n objdomain = True\n typecheck = True\nclass ClauseFactory_ObjNoType(CentralClauseFactory):\n objdomain = True\n typecheck = False\nclass ClauseFactory_NoObjType(CentralClauseFactory):\n objdomain = False\n typecheck = True\nclass ClauseFactory_NoObjNoType(CentralClauseFactory):\n objdomain = False\n typecheck = False\n\ndef get_clause_factory(*, use_objdomain, use_typecheck):\n \"\"\"Construct a clause factory subclass with the desired flags.\"\"\"\n return {(True, True): ClauseFactory_ObjType,\n (True, False): ClauseFactory_ObjNoType,\n (False, True): ClauseFactory_NoObjType,\n (False, False): ClauseFactory_NoObjNoType\n }[use_objdomain, use_typecheck]\n\n\nclass Manager:\n \n \"\"\"Transformation manager. Keeps centralized information about\n transforming a single program.\n \n This information includes:\n \n - the parser (with registered macros)\n - the specified options\n - fresh name/prefix generator\n - pair sets used for representing object-domain relationships\n \"\"\"\n \n def __init__(self, namegen=None):\n if namegen is None:\n namegen = L.NameGenerator()\n self.namegen = namegen\n \"\"\"Unique variable identifier generator.\"\"\"\n \n self.parser = L\n \n self.compnamegen = L.NameGenerator(fmt='Comp{}', counter=1)\n \"\"\"Generator specifically for naming comprehension queries.\"\"\"\n \n from incoq.compiler.aggr import AGGR_PREFIX\n self.aggrnamegen = L.NameGenerator(fmt=AGGR_PREFIX + '{}', counter=1)\n \"\"\"Generator specifically for naming aggregate queries.\"\"\"\n \n self.options = OptionsManager()\n \"\"\"Options for transformation.\"\"\"\n \n self.header_comments = []\n \"\"\"List of comments to emit at top of code.\"\"\"\n \n self.vartypes = {}\n \"\"\"Variable types. Keys are variable names, values are type terms.\"\"\"\n \n self.stats = {\n 'trans time': 0, # transformation time (process time)\n 'lines': 0, # lines of code, excl. whitespace/comments\n \n 'incr queries': 0, # number of queries incrementalized\n 'incr comps': 0, # number of comps incrementalized\n 'incr aggrs': 0, # number of aggregates incrementalized\n \n 'orig queries': 0, # number of incr. queries that were\n # from the input program\n 'orig updates': 0, # number of updates to incr. queries\n # from the input program\n \n 'dem structs': 0, # number of tags/filters/inner-usets\n # created for filtered comps\n \n 'comps expanded': 0, # number of comps expanded as batch + maps \n \n 'auxmaps': 0, # number of auxmaps created\n \n 'queries processed': 0, # number of queries considered for\n # transformation (not necessarily actually\n # transformed)\n \n 'queries skipped': 0, # number of queries skipped for not\n # satisfying syntactic requirements\n # for transformation\n \n # The following are used for exporting transformation data\n # for later analysis.\n 'costs': {}, # dictionary mapping from function name to\n # analyzed cost\n 'domain_subst': {}, # domain constraint solutions\n 'invariants': {}, # mapping from invariant name to spec obj\n }\n \"\"\"Statistics about the transformation.\"\"\"\n \n self.original_queryinvs = set()\n \"\"\"Set of names of invariants corresponding to queries\n from the original program.\n \"\"\"\n \n # Hackish.\n self.parser.manager = self\n self.parser.options = self.options\n \n # Still hackish.\n self.use_mset = False\n self.fields = []\n self.use_mapset = False\n \n self.invariants = {}\n \"\"\"Map from name to IncComp/IncAggr object.\"\"\"\n \n def add_macros(self, seq):\n \"\"\"Register a sequence of ContextMacros, and update their\n manager to be this one.\n \"\"\"\n for item in seq:\n self.parser.macros.add(item)\n item.manager = self\n \n def add_note(self, s):\n \"\"\"Append a line to the header comments.\"\"\"\n self.header_comments += [L.Comment(s)]\n \n def add_invariant(self, name, inv):\n from incoq.compiler.comp import IncComp\n from incoq.compiler.aggr import IncAggr\n assert isinstance(inv, (IncComp, IncAggr))\n self.invariants[name] = inv\n self.add_note('{name} := {inv.spec}'.format(**locals()))\n \n def analyze_types(self, tree):\n \"\"\"Do type analysis on a program fragment using saved type info.\n Return the annotated fragment and update the saved info.\n \"\"\"\n # Old type analysis (constraints).\n# tree, self.vartypes = L.analyze_types_constraints(\n# tree, self.vartypes)\n # New type analysis (abstract interpretation).\n tree, self.vartypes = L.analyze_types(tree, self.vartypes)\n return tree\n\n\ndef make_manager():\n \"\"\"Construct and return a Manager with all macros.\"\"\"\n man = Manager()\n man.factory = CentralClauseFactory\n return man\n\n\nclass ManagerCase:\n \n \"\"\"Mixin for unit tests that need a manager for parsing purposes.\"\"\"\n \n def setUp(self):\n super().setUp()\n \n self.manager = man = make_manager()\n self.options = man.options\n self.parser = man.parser\n self.p = man.parser.p\n self.pc = man.parser.pc\n self.ps = man.parser.ps\n self.pe = man.parser.pe\n self.trim = man.parser.trim\n self.ts = man.parser.ts\n \n def tearDown(self):\n del self.manager\n del self.options\n del self.parser, self.p, self.pc, self.ps, self.pe\n \n super().tearDown()\n \n def make_join(self, source):\n \"\"\"Construct a Join from a comprehension's source code\n (ignoring the result expression).\n \"\"\"\n node = L.pe(source)\n join = Join.from_comp(node, self.manager.factory)\n return join\n \n def make_relcompspec(self, source, params):\n \"\"\"Construct a CompSpec from a comprehension's source code.\"\"\"\n node = L.pe(source)\n node = node._replace(params=params)\n spec = CompSpec.from_comp(node, self.manager.factory)\n return spec\n\n\nclass CentralCase(ManagerCase, TestCase):\n \n \"\"\"Combination-class for unit test cases with added functionality.\"\"\"\n" }, { "alpha_fraction": 0.36936935782432556, "alphanum_fraction": 0.36936935782432556, "avg_line_length": 27.542856216430664, "blob_id": "48585e7c651ec24857c06c237b31a1a660b924b6", "content_id": "83ca10676731833cc196130bd6850310b5bad6a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1998, "license_type": "no_license", "max_line_length": 79, "num_lines": 70, "path": "/incoq/compiler/set/setmatch.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "###############################################################################\n# setmatch.py #\n# Author: Jon Brandvein #\n###############################################################################\n\n\"\"\"Tuple pattern matching.\"\"\"\n\n\n__all__ = [\n 'make_bindmatch',\n 'make_tuplematch',\n]\n\n\nimport incoq.compiler.incast as L\n\n\ndef make_bindmatch(rel, mask, bvars, uvars, body):\n if mask.is_allbound and not mask.has_equalities:\n template = L.trim('''\n if BVARS in REL:\n BODY\n ''')\n \n elif mask.is_allunbound and not mask.has_wildcards:\n template = L.trim('''\n for UVARS in REL:\n BODY\n ''')\n \n else:\n template = L.trim('''\n for UVARS in setmatch(REL, MASK, BVARS):\n BODY\n ''')\n \n code = L.pc(template, subst={'REL': L.ln(rel),\n 'MASK': mask.make_node(),\n 'BVARS': L.tuplify(bvars),\n 'UVARS': L.tuplify(uvars, lval=True),\n '<c>BODY': body})\n return code\n\n\ndef make_tuplematch(val, mask, bvars, uvars, body):\n if mask.is_allbound:\n template = L.trim('''\n if BVARS == VAL:\n BODY\n ''')\n \n elif mask.is_allunbound and not mask.has_wildcards:\n template = L.trim('''\n UVARS = VAL\n BODY\n ''')\n \n else:\n template = L.trim('''\n for UVARS in setmatch({VAL}, MASK, BVARS):\n BODY\n ''')\n \n code = L.pc(template,\n subst={'VAL': val,\n 'MASK': mask.make_node(),\n 'BVARS': L.tuplify(bvars),\n 'UVARS': L.tuplify(uvars, lval=True),\n '<c>BODY': body})\n return code\n" }, { "alpha_fraction": 0.46872514486312866, "alphanum_fraction": 0.5354130268096924, "avg_line_length": 35.748538970947266, "blob_id": "3d0bcdd571e002bd2e8068ec63f2d385ba3b3f9c", "content_id": "0064f3d27b7a3a3faea76e7c4085a9c8cf9e7f9a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6283, "license_type": "no_license", "max_line_length": 94, "num_lines": 171, "path": "/incoq/tests/programs/aggr/rewrite_out.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from incoq.runtime import *\n# Comp1 := {(R, _e) : R in _U_Comp1, (R, _e) in _M}\n# Aggr1 := sum(DEMQUERY(Comp1, [R], setmatch(Comp1, 'bu', R)), None)\n_m_Comp1_out = Map()\ndef _maint__m_Comp1_out_add(_e):\n (v11_1, v11_2) = _e\n if (v11_1 not in _m_Comp1_out):\n _m_Comp1_out[v11_1] = set()\n _m_Comp1_out[v11_1].add(v11_2)\n\ndef _maint__m_Comp1_out_remove(_e):\n (v12_1, v12_2) = _e\n _m_Comp1_out[v12_1].remove(v12_2)\n if (len(_m_Comp1_out[v12_1]) == 0):\n del _m_Comp1_out[v12_1]\n\n_m_Aggr1_out = Map()\ndef _maint__m_Aggr1_out_add(_e):\n (v9_1, v9_2) = _e\n if (v9_1 not in _m_Aggr1_out):\n _m_Aggr1_out[v9_1] = set()\n _m_Aggr1_out[v9_1].add(v9_2)\n\ndef _maint__m_Aggr1_out_remove(_e):\n (v10_1, v10_2) = _e\n _m_Aggr1_out[v10_1].remove(v10_2)\n if (len(_m_Aggr1_out[v10_1]) == 0):\n del _m_Aggr1_out[v10_1]\n\ndef _maint_Aggr1_add(_e):\n (v5_v1, v5_v2) = _e\n if (v5_v1 in _U_Aggr1):\n v5_val = _m_Aggr1_out.singlelookup(v5_v1)\n v5_val = (v5_val + v5_v2)\n v5_1 = v5_v1\n v5_elem = _m_Aggr1_out.singlelookup(v5_v1)\n # Begin maint _m_Aggr1_out before \"Aggr1.remove((v5_1, v5_elem))\"\n _maint__m_Aggr1_out_remove((v5_1, v5_elem))\n # End maint _m_Aggr1_out before \"Aggr1.remove((v5_1, v5_elem))\"\n # Begin maint _m_Aggr1_out after \"Aggr1.add((v5_1, v5_val))\"\n _maint__m_Aggr1_out_add((v5_1, v5_val))\n # End maint _m_Aggr1_out after \"Aggr1.add((v5_1, v5_val))\"\n\ndef _maint_Aggr1_remove(_e):\n (v6_v1, v6_v2) = _e\n if (v6_v1 in _U_Aggr1):\n v6_val = _m_Aggr1_out.singlelookup(v6_v1)\n v6_val = (v6_val - v6_v2)\n v6_1 = v6_v1\n v6_elem = _m_Aggr1_out.singlelookup(v6_v1)\n # Begin maint _m_Aggr1_out before \"Aggr1.remove((v6_1, v6_elem))\"\n _maint__m_Aggr1_out_remove((v6_1, v6_elem))\n # End maint _m_Aggr1_out before \"Aggr1.remove((v6_1, v6_elem))\"\n # Begin maint _m_Aggr1_out after \"Aggr1.add((v6_1, v6_val))\"\n _maint__m_Aggr1_out_add((v6_1, v6_val))\n # End maint _m_Aggr1_out after \"Aggr1.add((v6_1, v6_val))\"\n\n_U_Aggr1 = RCSet()\n_UEXT_Aggr1 = Set()\ndef demand_Aggr1(R):\n \"sum(DEMQUERY(Comp1, [R], setmatch(Comp1, 'bu', R)), None)\"\n if (R not in _U_Aggr1):\n _U_Aggr1.add(R)\n # Begin maint Aggr1 after \"_U_Aggr1.add(R)\"\n v7_val = 0\n for v7_elem in (_m_Comp1_out[R] if (R in _m_Comp1_out) else set()):\n v7_val = (v7_val + v7_elem)\n v7_1 = R\n # Begin maint _m_Aggr1_out after \"Aggr1.add((v7_1, v7_val))\"\n _maint__m_Aggr1_out_add((v7_1, v7_val))\n # End maint _m_Aggr1_out after \"Aggr1.add((v7_1, v7_val))\"\n demand_Comp1(R)\n # End maint Aggr1 after \"_U_Aggr1.add(R)\"\n else:\n _U_Aggr1.incref(R)\n\ndef undemand_Aggr1(R):\n \"sum(DEMQUERY(Comp1, [R], setmatch(Comp1, 'bu', R)), None)\"\n if (_U_Aggr1.getref(R) == 1):\n # Begin maint Aggr1 before \"_U_Aggr1.remove(R)\"\n undemand_Comp1(R)\n v8_1 = R\n v8_elem = _m_Aggr1_out.singlelookup(R)\n # Begin maint _m_Aggr1_out before \"Aggr1.remove((v8_1, v8_elem))\"\n _maint__m_Aggr1_out_remove((v8_1, v8_elem))\n # End maint _m_Aggr1_out before \"Aggr1.remove((v8_1, v8_elem))\"\n # End maint Aggr1 before \"_U_Aggr1.remove(R)\"\n _U_Aggr1.remove(R)\n else:\n _U_Aggr1.decref(R)\n\ndef query_Aggr1(R):\n \"sum(DEMQUERY(Comp1, [R], setmatch(Comp1, 'bu', R)), None)\"\n if (R not in _UEXT_Aggr1):\n _UEXT_Aggr1.add(R)\n demand_Aggr1(R)\n return True\n\ndef _maint_Comp1__U_Comp1_add(_e):\n # Iterate {(v1_R, v1__e) : v1_R in deltamatch(_U_Comp1, 'b', _e, 1), (v1_R, v1__e) in _M}\n v1_R = _e\n if isinstance(v1_R, Set):\n for v1__e in v1_R:\n # Begin maint _m_Comp1_out after \"Comp1.add((v1_R, v1__e))\"\n _maint__m_Comp1_out_add((v1_R, v1__e))\n # End maint _m_Comp1_out after \"Comp1.add((v1_R, v1__e))\"\n # Begin maint Aggr1 after \"Comp1.add((v1_R, v1__e))\"\n _maint_Aggr1_add((v1_R, v1__e))\n # End maint Aggr1 after \"Comp1.add((v1_R, v1__e))\"\n\ndef _maint_Comp1__U_Comp1_remove(_e):\n # Iterate {(v2_R, v2__e) : v2_R in deltamatch(_U_Comp1, 'b', _e, 1), (v2_R, v2__e) in _M}\n v2_R = _e\n if isinstance(v2_R, Set):\n for v2__e in v2_R:\n # Begin maint Aggr1 before \"Comp1.remove((v2_R, v2__e))\"\n _maint_Aggr1_remove((v2_R, v2__e))\n # End maint Aggr1 before \"Comp1.remove((v2_R, v2__e))\"\n # Begin maint _m_Comp1_out before \"Comp1.remove((v2_R, v2__e))\"\n _maint__m_Comp1_out_remove((v2_R, v2__e))\n # End maint _m_Comp1_out before \"Comp1.remove((v2_R, v2__e))\"\n\ndef _maint_Comp1__M_add(_e):\n # Iterate {(v3_R, v3__e) : v3_R in _U_Comp1, (v3_R, v3__e) in deltamatch(_M, 'bb', _e, 1)}\n (v3_R, v3__e) = _e\n if (v3_R in _U_Comp1):\n # Begin maint _m_Comp1_out after \"Comp1.add((v3_R, v3__e))\"\n _maint__m_Comp1_out_add((v3_R, v3__e))\n # End maint _m_Comp1_out after \"Comp1.add((v3_R, v3__e))\"\n # Begin maint Aggr1 after \"Comp1.add((v3_R, v3__e))\"\n _maint_Aggr1_add((v3_R, v3__e))\n # End maint Aggr1 after \"Comp1.add((v3_R, v3__e))\"\n\n_U_Comp1 = RCSet()\n_UEXT_Comp1 = Set()\ndef demand_Comp1(R):\n '{(R, _e) : R in _U_Comp1, (R, _e) in _M}'\n if (R not in _U_Comp1):\n _U_Comp1.add(R)\n # Begin maint Comp1 after \"_U_Comp1.add(R)\"\n _maint_Comp1__U_Comp1_add(R)\n # End maint Comp1 after \"_U_Comp1.add(R)\"\n else:\n _U_Comp1.incref(R)\n\ndef undemand_Comp1(R):\n '{(R, _e) : R in _U_Comp1, (R, _e) in _M}'\n if (_U_Comp1.getref(R) == 1):\n # Begin maint Comp1 before \"_U_Comp1.remove(R)\"\n _maint_Comp1__U_Comp1_remove(R)\n # End maint Comp1 before \"_U_Comp1.remove(R)\"\n _U_Comp1.remove(R)\n else:\n _U_Comp1.decref(R)\n\ndef query_Comp1(R):\n '{(R, _e) : R in _U_Comp1, (R, _e) in _M}'\n if (R not in _UEXT_Comp1):\n _UEXT_Comp1.add(R)\n demand_Comp1(R)\n return True\n\nR = Set()\nR.add(1)\n# Begin maint Comp1 after \"_M.add((R, 1))\"\n_maint_Comp1__M_add((R, 1))\n# End maint Comp1 after \"_M.add((R, 1))\"\nprint((query_Aggr1(R) and _m_Aggr1_out.singlelookup(R)))\nprint(sum({1, 2}))\nprint(sum([1, 2]))\nprint(sum(({1: 1}[1], 2)))" }, { "alpha_fraction": 0.5061983466148376, "alphanum_fraction": 0.5247933864593506, "avg_line_length": 13.666666984558105, "blob_id": "0fc4f5bbaaa4dd297278b77db240986cd59e37d8", "content_id": "846cc54100e344b65be3741c1318164e9da3a022", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 484, "license_type": "no_license", "max_line_length": 36, "num_lines": 33, "path": "/incoq/tests/programs/objcomp/inc_in.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "# Basic object-set comprehension.\n\nfrom incoq.runtime import *\n\nOPTIONS(\n obj_domain = True,\n)\n\n# Test non-transformation of _add().\nN = Set()\nfor i in range(1, 5):\n N._add(i)\n\ns1 = Set()\ns2 = Set()\nfor i in N:\n o = Obj()\n o.i = i\n if i % 2:\n s1.add(o)\n else:\n s2.add(o)\n\nQUERYOPTIONS(\n '{o.i for o in s}',\n params = ['s'],\n uset_mode = 'none',\n impl = 'inc',\n)\ns = s1\nprint(sorted({o.i for o in s}))\ns = s2\nprint(sorted({o.i for o in s}))\n" }, { "alpha_fraction": 0.4425523281097412, "alphanum_fraction": 0.5120705366134644, "avg_line_length": 39.918033599853516, "blob_id": "988e05c941b5e2c09c066774e4915b0f2e41ff7f", "content_id": "6045ccca367ebee4223aa848f005b0bedb36dd04", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9983, "license_type": "no_license", "max_line_length": 169, "num_lines": 244, "path": "/incoq/tests/programs/deminc/obj_out.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from incoq.runtime import *\n# Comp1 := {(S, x_a) : S in _U_Comp1, (S, x) in _M, (x, x_a) in _F_a}\n# Comp1_TS := {S : S in _U_Comp1}\n# Comp1_d_M := {(S, x) : S in Comp1_TS, (S, x) in _M}\n# Comp1_Tx := {x : (S, x) in Comp1_d_M}\n# Comp1_d_F_a := {(x, x_a) : x in Comp1_Tx, (x, x_a) in _F_a}\n_m_Comp1_out = Map()\ndef _maint__m_Comp1_out_add(_e):\n (v21_1, v21_2) = _e\n if (v21_1 not in _m_Comp1_out):\n _m_Comp1_out[v21_1] = set()\n _m_Comp1_out[v21_1].add(v21_2)\n\ndef _maint__m_Comp1_out_remove(_e):\n (v22_1, v22_2) = _e\n _m_Comp1_out[v22_1].remove(v22_2)\n if (len(_m_Comp1_out[v22_1]) == 0):\n del _m_Comp1_out[v22_1]\n\n_m_Comp1_d_M_in = Map()\ndef _maint__m_Comp1_d_M_in_add(_e):\n (v19_1, v19_2) = _e\n if (v19_2 not in _m_Comp1_d_M_in):\n _m_Comp1_d_M_in[v19_2] = set()\n _m_Comp1_d_M_in[v19_2].add(v19_1)\n\ndef _maint__m_Comp1_d_M_in_remove(_e):\n (v20_1, v20_2) = _e\n _m_Comp1_d_M_in[v20_2].remove(v20_1)\n if (len(_m_Comp1_d_M_in[v20_2]) == 0):\n del _m_Comp1_d_M_in[v20_2]\n\nComp1_d_F_a = RCSet()\ndef _maint_Comp1_d_F_a_Comp1_Tx_add(_e):\n # Iterate {(v15_x, v15_x_a) : v15_x in deltamatch(Comp1_Tx, 'b', _e, 1), (v15_x, v15_x_a) in _F_a}\n v15_x = _e\n if hasattr(v15_x, 'a'):\n v15_x_a = v15_x.a\n Comp1_d_F_a.add((v15_x, v15_x_a))\n\ndef _maint_Comp1_d_F_a_Comp1_Tx_remove(_e):\n # Iterate {(v16_x, v16_x_a) : v16_x in deltamatch(Comp1_Tx, 'b', _e, 1), (v16_x, v16_x_a) in _F_a}\n v16_x = _e\n if hasattr(v16_x, 'a'):\n v16_x_a = v16_x.a\n Comp1_d_F_a.remove((v16_x, v16_x_a))\n\ndef _maint_Comp1_d_F_a__F_a_add(_e):\n # Iterate {(v17_x, v17_x_a) : v17_x in Comp1_Tx, (v17_x, v17_x_a) in deltamatch(_F_a, 'bb', _e, 1)}\n (v17_x, v17_x_a) = _e\n if (v17_x in Comp1_Tx):\n Comp1_d_F_a.add((v17_x, v17_x_a))\n\nComp1_Tx = RCSet()\ndef _maint_Comp1_Tx_Comp1_d_M_add(_e):\n # Iterate {(v13_S, v13_x) : (v13_S, v13_x) in deltamatch(Comp1_d_M, 'bb', _e, 1)}\n (v13_S, v13_x) = _e\n if (v13_x not in Comp1_Tx):\n Comp1_Tx.add(v13_x)\n # Begin maint Comp1_d_F_a after \"Comp1_Tx.add(v13_x)\"\n _maint_Comp1_d_F_a_Comp1_Tx_add(v13_x)\n # End maint Comp1_d_F_a after \"Comp1_Tx.add(v13_x)\"\n else:\n Comp1_Tx.incref(v13_x)\n\ndef _maint_Comp1_Tx_Comp1_d_M_remove(_e):\n # Iterate {(v14_S, v14_x) : (v14_S, v14_x) in deltamatch(Comp1_d_M, 'bb', _e, 1)}\n (v14_S, v14_x) = _e\n if (Comp1_Tx.getref(v14_x) == 1):\n # Begin maint Comp1_d_F_a before \"Comp1_Tx.remove(v14_x)\"\n _maint_Comp1_d_F_a_Comp1_Tx_remove(v14_x)\n # End maint Comp1_d_F_a before \"Comp1_Tx.remove(v14_x)\"\n Comp1_Tx.remove(v14_x)\n else:\n Comp1_Tx.decref(v14_x)\n\nComp1_d_M = RCSet()\ndef _maint_Comp1_d_M_Comp1_TS_add(_e):\n # Iterate {(v9_S, v9_x) : v9_S in deltamatch(Comp1_TS, 'b', _e, 1), (v9_S, v9_x) in _M}\n v9_S = _e\n if isinstance(v9_S, Set):\n for v9_x in v9_S:\n Comp1_d_M.add((v9_S, v9_x))\n # Begin maint _m_Comp1_d_M_in after \"Comp1_d_M.add((v9_S, v9_x))\"\n _maint__m_Comp1_d_M_in_add((v9_S, v9_x))\n # End maint _m_Comp1_d_M_in after \"Comp1_d_M.add((v9_S, v9_x))\"\n # Begin maint Comp1_Tx after \"Comp1_d_M.add((v9_S, v9_x))\"\n _maint_Comp1_Tx_Comp1_d_M_add((v9_S, v9_x))\n # End maint Comp1_Tx after \"Comp1_d_M.add((v9_S, v9_x))\"\n\ndef _maint_Comp1_d_M_Comp1_TS_remove(_e):\n # Iterate {(v10_S, v10_x) : v10_S in deltamatch(Comp1_TS, 'b', _e, 1), (v10_S, v10_x) in _M}\n v10_S = _e\n if isinstance(v10_S, Set):\n for v10_x in v10_S:\n # Begin maint Comp1_Tx before \"Comp1_d_M.remove((v10_S, v10_x))\"\n _maint_Comp1_Tx_Comp1_d_M_remove((v10_S, v10_x))\n # End maint Comp1_Tx before \"Comp1_d_M.remove((v10_S, v10_x))\"\n # Begin maint _m_Comp1_d_M_in before \"Comp1_d_M.remove((v10_S, v10_x))\"\n _maint__m_Comp1_d_M_in_remove((v10_S, v10_x))\n # End maint _m_Comp1_d_M_in before \"Comp1_d_M.remove((v10_S, v10_x))\"\n Comp1_d_M.remove((v10_S, v10_x))\n\ndef _maint_Comp1_d_M__M_add(_e):\n # Iterate {(v11_S, v11_x) : v11_S in Comp1_TS, (v11_S, v11_x) in deltamatch(_M, 'bb', _e, 1)}\n (v11_S, v11_x) = _e\n if (v11_S in Comp1_TS):\n Comp1_d_M.add((v11_S, v11_x))\n # Begin maint _m_Comp1_d_M_in after \"Comp1_d_M.add((v11_S, v11_x))\"\n _maint__m_Comp1_d_M_in_add((v11_S, v11_x))\n # End maint _m_Comp1_d_M_in after \"Comp1_d_M.add((v11_S, v11_x))\"\n # Begin maint Comp1_Tx after \"Comp1_d_M.add((v11_S, v11_x))\"\n _maint_Comp1_Tx_Comp1_d_M_add((v11_S, v11_x))\n # End maint Comp1_Tx after \"Comp1_d_M.add((v11_S, v11_x))\"\n\nComp1_TS = RCSet()\ndef _maint_Comp1_TS__U_Comp1_add(_e):\n # Iterate {v7_S : v7_S in deltamatch(_U_Comp1, 'b', _e, 1)}\n v7_S = _e\n Comp1_TS.add(v7_S)\n # Begin maint Comp1_d_M after \"Comp1_TS.add(v7_S)\"\n _maint_Comp1_d_M_Comp1_TS_add(v7_S)\n # End maint Comp1_d_M after \"Comp1_TS.add(v7_S)\"\n\ndef _maint_Comp1_TS__U_Comp1_remove(_e):\n # Iterate {v8_S : v8_S in deltamatch(_U_Comp1, 'b', _e, 1)}\n v8_S = _e\n # Begin maint Comp1_d_M before \"Comp1_TS.remove(v8_S)\"\n _maint_Comp1_d_M_Comp1_TS_remove(v8_S)\n # End maint Comp1_d_M before \"Comp1_TS.remove(v8_S)\"\n Comp1_TS.remove(v8_S)\n\nComp1 = RCSet()\ndef _maint_Comp1__U_Comp1_add(_e):\n # Iterate {(v1_S, v1_x, v1_x_a) : v1_S in deltamatch(_U_Comp1, 'b', _e, 1), (v1_S, v1_x) in _M, (v1_x, v1_x_a) in _F_a}\n v1_S = _e\n if isinstance(v1_S, Set):\n for v1_x in v1_S:\n if hasattr(v1_x, 'a'):\n v1_x_a = v1_x.a\n if ((v1_S, v1_x_a) not in Comp1):\n Comp1.add((v1_S, v1_x_a))\n # Begin maint _m_Comp1_out after \"Comp1.add((v1_S, v1_x_a))\"\n _maint__m_Comp1_out_add((v1_S, v1_x_a))\n # End maint _m_Comp1_out after \"Comp1.add((v1_S, v1_x_a))\"\n else:\n Comp1.incref((v1_S, v1_x_a))\n\ndef _maint_Comp1__U_Comp1_remove(_e):\n # Iterate {(v2_S, v2_x, v2_x_a) : v2_S in deltamatch(_U_Comp1, 'b', _e, 1), (v2_S, v2_x) in _M, (v2_x, v2_x_a) in _F_a}\n v2_S = _e\n if isinstance(v2_S, Set):\n for v2_x in v2_S:\n if hasattr(v2_x, 'a'):\n v2_x_a = v2_x.a\n if (Comp1.getref((v2_S, v2_x_a)) == 1):\n # Begin maint _m_Comp1_out before \"Comp1.remove((v2_S, v2_x_a))\"\n _maint__m_Comp1_out_remove((v2_S, v2_x_a))\n # End maint _m_Comp1_out before \"Comp1.remove((v2_S, v2_x_a))\"\n Comp1.remove((v2_S, v2_x_a))\n else:\n Comp1.decref((v2_S, v2_x_a))\n\ndef _maint_Comp1__M_add(_e):\n # Iterate {(v3_S, v3_x, v3_x_a) : v3_S in _U_Comp1, (v3_S, v3_x) in deltamatch(Comp1_d_M, 'bb', _e, 1), (v3_S, v3_x) in Comp1_d_M, (v3_x, v3_x_a) in _F_a}\n (v3_S, v3_x) = _e\n if (v3_S in _U_Comp1):\n if ((v3_S, v3_x) in Comp1_d_M):\n if hasattr(v3_x, 'a'):\n v3_x_a = v3_x.a\n if ((v3_S, v3_x_a) not in Comp1):\n Comp1.add((v3_S, v3_x_a))\n # Begin maint _m_Comp1_out after \"Comp1.add((v3_S, v3_x_a))\"\n _maint__m_Comp1_out_add((v3_S, v3_x_a))\n # End maint _m_Comp1_out after \"Comp1.add((v3_S, v3_x_a))\"\n else:\n Comp1.incref((v3_S, v3_x_a))\n\ndef _maint_Comp1__F_a_add(_e):\n # Iterate {(v5_S, v5_x, v5_x_a) : v5_S in _U_Comp1, (v5_S, v5_x) in Comp1_d_M, (v5_x, v5_x_a) in deltamatch(Comp1_d_F_a, 'bb', _e, 1), (v5_x, v5_x_a) in Comp1_d_F_a}\n (v5_x, v5_x_a) = _e\n if ((v5_x, v5_x_a) in Comp1_d_F_a):\n for v5_S in (_m_Comp1_d_M_in[v5_x] if (v5_x in _m_Comp1_d_M_in) else set()):\n if (v5_S in _U_Comp1):\n if ((v5_S, v5_x_a) not in Comp1):\n Comp1.add((v5_S, v5_x_a))\n # Begin maint _m_Comp1_out after \"Comp1.add((v5_S, v5_x_a))\"\n _maint__m_Comp1_out_add((v5_S, v5_x_a))\n # End maint _m_Comp1_out after \"Comp1.add((v5_S, v5_x_a))\"\n else:\n Comp1.incref((v5_S, v5_x_a))\n\n_U_Comp1 = RCSet()\n_UEXT_Comp1 = Set()\ndef demand_Comp1(S):\n '{(S, x_a) : S in _U_Comp1, (S, x) in _M, (x, x_a) in _F_a}'\n if (S not in _U_Comp1):\n _U_Comp1.add(S)\n # Begin maint Comp1_TS after \"_U_Comp1.add(S)\"\n _maint_Comp1_TS__U_Comp1_add(S)\n # End maint Comp1_TS after \"_U_Comp1.add(S)\"\n # Begin maint Comp1 after \"_U_Comp1.add(S)\"\n _maint_Comp1__U_Comp1_add(S)\n # End maint Comp1 after \"_U_Comp1.add(S)\"\n else:\n _U_Comp1.incref(S)\n\ndef undemand_Comp1(S):\n '{(S, x_a) : S in _U_Comp1, (S, x) in _M, (x, x_a) in _F_a}'\n if (_U_Comp1.getref(S) == 1):\n # Begin maint Comp1 before \"_U_Comp1.remove(S)\"\n _maint_Comp1__U_Comp1_remove(S)\n # End maint Comp1 before \"_U_Comp1.remove(S)\"\n # Begin maint Comp1_TS before \"_U_Comp1.remove(S)\"\n _maint_Comp1_TS__U_Comp1_remove(S)\n # End maint Comp1_TS before \"_U_Comp1.remove(S)\"\n _U_Comp1.remove(S)\n else:\n _U_Comp1.decref(S)\n\ndef query_Comp1(S):\n '{(S, x_a) : S in _U_Comp1, (S, x) in _M, (x, x_a) in _F_a}'\n if (S not in _UEXT_Comp1):\n _UEXT_Comp1.add(S)\n demand_Comp1(S)\n return True\n\nS = Set()\no = Obj()\no.a = 1\n# Begin maint Comp1_d_F_a after \"_F_a.add((o, 1))\"\n_maint_Comp1_d_F_a__F_a_add((o, 1))\n# End maint Comp1_d_F_a after \"_F_a.add((o, 1))\"\n# Begin maint Comp1 after \"_F_a.add((o, 1))\"\n_maint_Comp1__F_a_add((o, 1))\n# End maint Comp1 after \"_F_a.add((o, 1))\"\nS.add(o)\n# Begin maint Comp1_d_M after \"_M.add((S, o))\"\n_maint_Comp1_d_M__M_add((S, o))\n# End maint Comp1_d_M after \"_M.add((S, o))\"\n# Begin maint Comp1 after \"_M.add((S, o))\"\n_maint_Comp1__M_add((S, o))\n# End maint Comp1 after \"_M.add((S, o))\"\nprint(sorted((query_Comp1(S) and (_m_Comp1_out[S] if (S in _m_Comp1_out) else set()))))" }, { "alpha_fraction": 0.47089630365371704, "alphanum_fraction": 0.5379320383071899, "avg_line_length": 35.72527313232422, "blob_id": "41196314e2ab52006fa1671079cc929b85adcda3", "content_id": "b6ee6563b0fd5d6ed63baf59c76c127e91778f2d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6683, "license_type": "no_license", "max_line_length": 91, "num_lines": 182, "path": "/incoq/tests/programs/aggr/lru_out.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from incoq.runtime import *\n# Comp1 := {(x, y) : x in _U_Comp1, (x, y) in E}\n# Aggr1 := sum(DEMQUERY(Comp1, [x], setmatch(Comp1, 'bu', x)), None)\n_m_E_out = Map()\ndef _maint__m_E_out_add(_e):\n (v13_1, v13_2) = _e\n if (v13_1 not in _m_E_out):\n _m_E_out[v13_1] = set()\n _m_E_out[v13_1].add(v13_2)\n\n_m_Comp1_out = Map()\ndef _maint__m_Comp1_out_add(_e):\n (v11_1, v11_2) = _e\n if (v11_1 not in _m_Comp1_out):\n _m_Comp1_out[v11_1] = set()\n _m_Comp1_out[v11_1].add(v11_2)\n\ndef _maint__m_Comp1_out_remove(_e):\n (v12_1, v12_2) = _e\n _m_Comp1_out[v12_1].remove(v12_2)\n if (len(_m_Comp1_out[v12_1]) == 0):\n del _m_Comp1_out[v12_1]\n\n_m_Aggr1_out = Map()\ndef _maint__m_Aggr1_out_add(_e):\n (v9_1, v9_2) = _e\n if (v9_1 not in _m_Aggr1_out):\n _m_Aggr1_out[v9_1] = set()\n _m_Aggr1_out[v9_1].add(v9_2)\n\ndef _maint__m_Aggr1_out_remove(_e):\n (v10_1, v10_2) = _e\n _m_Aggr1_out[v10_1].remove(v10_2)\n if (len(_m_Aggr1_out[v10_1]) == 0):\n del _m_Aggr1_out[v10_1]\n\ndef _maint_Aggr1_add(_e):\n (v5_v1, v5_v2) = _e\n if (v5_v1 in _U_Aggr1):\n v5_val = _m_Aggr1_out.singlelookup(v5_v1)\n v5_val = (v5_val + v5_v2)\n v5_1 = v5_v1\n v5_elem = _m_Aggr1_out.singlelookup(v5_v1)\n # Begin maint _m_Aggr1_out before \"Aggr1.remove((v5_1, v5_elem))\"\n _maint__m_Aggr1_out_remove((v5_1, v5_elem))\n # End maint _m_Aggr1_out before \"Aggr1.remove((v5_1, v5_elem))\"\n # Begin maint _m_Aggr1_out after \"Aggr1.add((v5_1, v5_val))\"\n _maint__m_Aggr1_out_add((v5_1, v5_val))\n # End maint _m_Aggr1_out after \"Aggr1.add((v5_1, v5_val))\"\n\ndef _maint_Aggr1_remove(_e):\n (v6_v1, v6_v2) = _e\n if (v6_v1 in _U_Aggr1):\n v6_val = _m_Aggr1_out.singlelookup(v6_v1)\n v6_val = (v6_val - v6_v2)\n v6_1 = v6_v1\n v6_elem = _m_Aggr1_out.singlelookup(v6_v1)\n # Begin maint _m_Aggr1_out before \"Aggr1.remove((v6_1, v6_elem))\"\n _maint__m_Aggr1_out_remove((v6_1, v6_elem))\n # End maint _m_Aggr1_out before \"Aggr1.remove((v6_1, v6_elem))\"\n # Begin maint _m_Aggr1_out after \"Aggr1.add((v6_1, v6_val))\"\n _maint__m_Aggr1_out_add((v6_1, v6_val))\n # End maint _m_Aggr1_out after \"Aggr1.add((v6_1, v6_val))\"\n\n_U_Aggr1 = RCSet()\n_UEXT_Aggr1 = LRUSet()\ndef demand_Aggr1(x):\n \"sum(DEMQUERY(Comp1, [x], setmatch(Comp1, 'bu', x)), None)\"\n if (x not in _U_Aggr1):\n _U_Aggr1.add(x)\n # Begin maint Aggr1 after \"_U_Aggr1.add(x)\"\n v7_val = 0\n for v7_elem in (_m_Comp1_out[x] if (x in _m_Comp1_out) else set()):\n v7_val = (v7_val + v7_elem)\n v7_1 = x\n # Begin maint _m_Aggr1_out after \"Aggr1.add((v7_1, v7_val))\"\n _maint__m_Aggr1_out_add((v7_1, v7_val))\n # End maint _m_Aggr1_out after \"Aggr1.add((v7_1, v7_val))\"\n demand_Comp1(x)\n # End maint Aggr1 after \"_U_Aggr1.add(x)\"\n else:\n _U_Aggr1.incref(x)\n\ndef undemand_Aggr1(x):\n \"sum(DEMQUERY(Comp1, [x], setmatch(Comp1, 'bu', x)), None)\"\n if (_U_Aggr1.getref(x) == 1):\n # Begin maint Aggr1 before \"_U_Aggr1.remove(x)\"\n undemand_Comp1(x)\n v8_1 = x\n v8_elem = _m_Aggr1_out.singlelookup(x)\n # Begin maint _m_Aggr1_out before \"Aggr1.remove((v8_1, v8_elem))\"\n _maint__m_Aggr1_out_remove((v8_1, v8_elem))\n # End maint _m_Aggr1_out before \"Aggr1.remove((v8_1, v8_elem))\"\n # End maint Aggr1 before \"_U_Aggr1.remove(x)\"\n _U_Aggr1.remove(x)\n else:\n _U_Aggr1.decref(x)\n\ndef query_Aggr1(x):\n \"sum(DEMQUERY(Comp1, [x], setmatch(Comp1, 'bu', x)), None)\"\n if (x not in _UEXT_Aggr1):\n while (len(_UEXT_Aggr1) >= 2):\n _top_v1 = _top = _UEXT_Aggr1.peek()\n undemand_Aggr1(_top_v1)\n _UEXT_Aggr1.remove(_top)\n _UEXT_Aggr1.add(x)\n demand_Aggr1(x)\n else:\n _UEXT_Aggr1.ping(x)\n return True\n\ndef _maint_Comp1__U_Comp1_add(_e):\n # Iterate {(v1_x, v1_y) : v1_x in deltamatch(_U_Comp1, 'b', _e, 1), (v1_x, v1_y) in E}\n v1_x = _e\n for v1_y in (_m_E_out[v1_x] if (v1_x in _m_E_out) else set()):\n # Begin maint _m_Comp1_out after \"Comp1.add((v1_x, v1_y))\"\n _maint__m_Comp1_out_add((v1_x, v1_y))\n # End maint _m_Comp1_out after \"Comp1.add((v1_x, v1_y))\"\n # Begin maint Aggr1 after \"Comp1.add((v1_x, v1_y))\"\n _maint_Aggr1_add((v1_x, v1_y))\n # End maint Aggr1 after \"Comp1.add((v1_x, v1_y))\"\n\ndef _maint_Comp1__U_Comp1_remove(_e):\n # Iterate {(v2_x, v2_y) : v2_x in deltamatch(_U_Comp1, 'b', _e, 1), (v2_x, v2_y) in E}\n v2_x = _e\n for v2_y in (_m_E_out[v2_x] if (v2_x in _m_E_out) else set()):\n # Begin maint Aggr1 before \"Comp1.remove((v2_x, v2_y))\"\n _maint_Aggr1_remove((v2_x, v2_y))\n # End maint Aggr1 before \"Comp1.remove((v2_x, v2_y))\"\n # Begin maint _m_Comp1_out before \"Comp1.remove((v2_x, v2_y))\"\n _maint__m_Comp1_out_remove((v2_x, v2_y))\n # End maint _m_Comp1_out before \"Comp1.remove((v2_x, v2_y))\"\n\ndef _maint_Comp1_E_add(_e):\n # Iterate {(v3_x, v3_y) : v3_x in _U_Comp1, (v3_x, v3_y) in deltamatch(E, 'bb', _e, 1)}\n (v3_x, v3_y) = _e\n if (v3_x in _U_Comp1):\n # Begin maint _m_Comp1_out after \"Comp1.add((v3_x, v3_y))\"\n _maint__m_Comp1_out_add((v3_x, v3_y))\n # End maint _m_Comp1_out after \"Comp1.add((v3_x, v3_y))\"\n # Begin maint Aggr1 after \"Comp1.add((v3_x, v3_y))\"\n _maint_Aggr1_add((v3_x, v3_y))\n # End maint Aggr1 after \"Comp1.add((v3_x, v3_y))\"\n\n_U_Comp1 = RCSet()\n_UEXT_Comp1 = Set()\ndef demand_Comp1(x):\n '{(x, y) : x in _U_Comp1, (x, y) in E}'\n if (x not in _U_Comp1):\n _U_Comp1.add(x)\n # Begin maint Comp1 after \"_U_Comp1.add(x)\"\n _maint_Comp1__U_Comp1_add(x)\n # End maint Comp1 after \"_U_Comp1.add(x)\"\n else:\n _U_Comp1.incref(x)\n\ndef undemand_Comp1(x):\n '{(x, y) : x in _U_Comp1, (x, y) in E}'\n if (_U_Comp1.getref(x) == 1):\n # Begin maint Comp1 before \"_U_Comp1.remove(x)\"\n _maint_Comp1__U_Comp1_remove(x)\n # End maint Comp1 before \"_U_Comp1.remove(x)\"\n _U_Comp1.remove(x)\n else:\n _U_Comp1.decref(x)\n\ndef query_Comp1(x):\n '{(x, y) : x in _U_Comp1, (x, y) in E}'\n if (x not in _UEXT_Comp1):\n _UEXT_Comp1.add(x)\n demand_Comp1(x)\n return True\n\nfor e in [(1, 2), (1, 3), (2, 4), (2, 10), (3, 1)]:\n # Begin maint _m_E_out after \"E.add(e)\"\n _maint__m_E_out_add(e)\n # End maint _m_E_out after \"E.add(e)\"\n # Begin maint Comp1 after \"E.add(e)\"\n _maint_Comp1_E_add(e)\n # End maint Comp1 after \"E.add(e)\"\nfor x in [1, 2, 1, 3]:\n print((query_Aggr1(x) and _m_Aggr1_out.singlelookup(x)))" }, { "alpha_fraction": 0.5699945092201233, "alphanum_fraction": 0.5706380009651184, "avg_line_length": 30.426692962646484, "blob_id": "185be17d0578b96443c6c8c8dca9345c27eeb1b3", "content_id": "9f412071a98b44e6e83e7b9722a62c4f94a0442b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 31081, "license_type": "no_license", "max_line_length": 76, "num_lines": 989, "path": "/incoq/compiler/comp/clause.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Membership and condition clauses, which are the components of joins.\"\"\"\n\n### TODO: Optimize subtract()/augment() to cancel out subclause/augclause\n\n\n__all__ = [\n 'apply_subst_tuple',\n 'inst_wildcards',\n 'vars_from_tuple',\n \n 'Clause',\n \n 'EnumClause',\n 'SubClause',\n 'AugClause',\n 'LookupClause',\n 'SingletonClause',\n 'DeltaClause',\n 'CondClause',\n \n 'ClauseFactory',\n]\n\n\nfrom abc import ABCMeta, abstractmethod, abstractclassmethod\n\nfrom simplestruct import Struct, MetaStruct, TypedField\n\nfrom incoq.util.type import checktype\nfrom incoq.util.seq import elim_duplicates\nimport incoq.compiler.incast as L\nfrom incoq.compiler.set import (Mask, AuxmapSpec, make_bindmatch,\n make_tuplematch)\n\nfrom .order import Rate\n\n\nclass ABCMetaStruct(MetaStruct, ABCMeta):\n pass\n\nclass ABCStruct(Struct, metaclass=ABCMetaStruct):\n pass\n\n\ndef apply_subst_tuple(vars, subst):\n \"\"\"Given a tuple of variables, return the result of applying a\n substitution. The substitution keys are input var names, and the\n values are either output var names, or else callables that take\n in an input var name and produce an output var name.\n \"\"\"\n new_vars = []\n for v in vars:\n newval = subst.get(v, v)\n if not isinstance(newval, str):\n newval = newval(v)\n new_vars.append(newval)\n return tuple(new_vars)\n\ndef inst_wildcards(vars):\n \"\"\"Given a tuple of variables and wildcards, return a tuple where\n wildcards are replaced with successive fresh vars _v1, ... _vn.\n \"\"\"\n namer = L.NameGenerator('_v{}', 1)\n vars = apply_subst_tuple(vars, {'_': lambda v: namer.next()})\n return vars\n\ndef vars_from_tuple(vars):\n \"\"\"Given a tuple of variables, return a tuple with duplicates\n and wilcards removed.\n \"\"\"\n return elim_duplicates(tuple(v for v in vars if v != '_'))\n\n\nclass Clause(metaclass=ABCMeta):\n \n \"\"\"Abstract base class for (unordered) join clauses.\"\"\"\n \n class ClauseKind:\n \"\"\"Enumeration class for KIND constants.\"\"\"\n KIND_ENUM = ClauseKind()\n KIND_COND = ClauseKind()\n \n kind = None\n \n isdelta = False\n \"\"\"True for clauses that represent the enumerator affected by\n an update.\n \"\"\"\n \n enumlhs = ()\n \"\"\"For enumerators, a tuple of the terms on the left-hand-side,\n in order, including duplicates and wildcards. For conditions,\n the empty tuple.\n \"\"\"\n \n enumrel = None\n \"\"\"Name of the iterated relation. None for conditions and for\n enumerators that are not over (expressions derived from) set\n variables.\n \"\"\"\n \n vars = ()\n \"\"\"For enumerators, same as enumvars. For conditions, a tuple\n of the variables the condition depends on.\n \"\"\"\n \n eqvars = None\n \"\"\"For a condition that equates variables, a pair of the\n equated variables. None for other conditions and for\n enumerators.\n \"\"\"\n \n robust = True\n \"\"\"True if this enumerator's meaning is independent of context\n (i.e., relies only on relations, which are global).\n \"\"\"\n \n inc_safe = True\n \"\"\"True if, provided that this clause is robust, it would be safe\n to incrementalize a comprehension having this clause.\n \"\"\"\n \n demname = None\n demparams = ()\n \"\"\"For an enumerator whose RHS is demand-driven, the corresponding\n query name and demand parameters.\n \"\"\"\n \n @property\n def has_demand(self):\n return self.demname is not None\n \n @property\n def enumvars(self):\n \"\"\"For enumerators, a tuple of the enumeration variables in\n the order they occur, omitting duplicates and wildcards. For\n conditions, the empty tuple.\n \"\"\"\n return vars_from_tuple(self.enumlhs)\n \n @property\n def has_wildcards(self):\n \"\"\"For an enumerator, True iff there are any wildcards on\n the LHS. For a condition, False.\n \"\"\"\n return any(v == '_' for v in self.enumlhs)\n \n # The following attributes are enumlhs masks -- tuples of bools\n # whose positions correspond to entries in enumlhs. For conditions\n # these are just the empty tuple, since enumlhs is the empty\n # tuple.\n \n @property\n def pat_mask(self):\n \"\"\"Enumlhs mask. True means the position is subject to\n pattern matching. Vars at that position can be renamed.\n \"\"\"\n return tuple(True for _ in self.enumlhs)\n \n @property\n def con_mask(self):\n \"\"\"Enumlhs mask. True means the position is constrained by\n the enumerator. Vars are considered unconstrained if they\n only appear at unconstrained positions.\n \"\"\"\n return tuple(True for _ in self.enumlhs)\n \n @property\n def tagsin_mask(self):\n \"\"\"Enumlhs mask. True means this clause can be filtered by\n a tag over the var at that position.\n \"\"\"\n return tuple(True for _ in self.enumlhs)\n \n @property\n def tagsout_mask(self):\n \"\"\"Enumlhs mask. True means this clause can introduce a tag\n over a var at that position.\n \"\"\"\n return tuple(True for _ in self.enumlhs)\n \n # For the above masks, these properties return the vars in\n # enumvars that appear in a position in enumlhs that satisfies\n # the mask.\n \n @property\n def enumvars_tagsin(self):\n return vars_from_tuple(\n v for v, b in zip(self.enumlhs, self.tagsin_mask) if b)\n \n @property\n def enumvars_tagsout(self):\n return vars_from_tuple(\n v for v, b in zip(self.enumlhs, self.tagsout_mask) if b)\n \n def get_domain_constrs(self, prefix):\n \"\"\"For an enumerator with non-None enumrel, return a sequence\n of domain constraints. For all other clauses return an empty\n sequence. Names of enumeration variables in the constraints\n get the supplied prefix.\n \"\"\"\n if self.enumrel is None:\n return ()\n dom = self.enumrel\n \n constrs = []\n \n # If there's only one component on the lhs, this is not\n # (necessarily) a set of singleton tuples, so don't emit\n # tuple-based constraints.\n if len(self.enumlhs) == 1:\n var = self.enumlhs[0]\n if var != '_':\n constrs.append((dom, prefix + var))\n \n else:\n subdoms = [dom + '.' + str(i)\n for i in range(1, len(self.enumlhs) + 1)]\n constr = (dom, tuple(['<T>'] + subdoms))\n constrs.append(constr)\n \n for i, var in enumerate(self.enumlhs, 1):\n if var == '_':\n continue\n constr = (dom + '.' + str(i), prefix + var)\n constrs.append(constr)\n \n return tuple(constrs)\n \n def get_membership_constrs(self):\n \"\"\"Return a sequence of labeled edge triples (x, y, i) meaning\n that x is constrained to be the ith component of a tuple in y.\n If i is None then x itself is an element of y. If enumrel is\n None return an empty sequence.\n \"\"\"\n if self.enumrel is None:\n return ()\n dom = self.enumrel\n \n edges = []\n \n if len(self.enumlhs) == 1:\n var = self.enumlhs[0]\n if var != '_':\n edges.append((var, dom, None))\n \n else:\n for i, var in enumerate(self.enumlhs, 1):\n if var == '_':\n continue\n edges.append((var, dom, i))\n \n return tuple(edges)\n \n @abstractclassmethod\n def from_AST(self, node, factory):\n \"\"\"Construct an instance of this clause from an AST.\n For an enumerator, this should be an Enumerator node of\n a certain form, depending on the subclass. For a condition,\n this is an expression node. Raise TypeError if construction\n from the given AST is not appropriate.\n \n factory is a ClauseFactory (instance or class) that may be\n used to construct other clauses that are needed to construct\n this one.\n \"\"\"\n \n @abstractmethod\n def to_AST(self):\n \"\"\"Return an AST representing this clause. For an enumerator,\n this is an Enumerator node. For a condition, this is an\n expression node.\n \"\"\"\n \n def __str__(self):\n clast = self.to_AST()\n s = L.ts(clast).strip()\n if isinstance(clast, L.Enumerator):\n # Get rid of \"for\" at the beginning.\n s = s[s.find(' ') + 1:]\n return s\n \n # User code should call ClauseFactory methods rather than these\n # helpers directly.\n \n def rewrite_subst(self, subst, factory):\n \"\"\"See ClauseFactory.\"\"\"\n clast = self.to_AST()\n if self.kind is Clause.KIND_ENUM:\n lhs = clast.target\n lhs = L.VarRenamer.run(lhs, subst)\n clast = clast._replace(target=lhs)\n else:\n clast = L.VarRenamer.run(clast, subst)\n return factory.from_AST(clast)\n \n def rewrite_lhs(self, subst, factory):\n \"\"\"See ClauseFactory.\"\"\"\n if self.kind is not Clause.KIND_ENUM:\n return self\n clast = self.to_AST()\n lhs = clast.target\n lhs = L.VarRenamer.run(lhs, subst)\n clast = clast._replace(target=lhs)\n return factory.from_AST(clast)\n \n def rewrite_rel(self, rel, factory):\n \"\"\"See ClauseFactory.\"\"\"\n raise TypeError\n \n def subtract_inner(self, excl, factory):\n \"\"\"See ClauseFactory.\"\"\"\n # Default implementation is to use subtract.\n # Must override if this clause is a wrapper\n # around another clause.\n return factory.subtract(self, excl)\n \n def fits_string(self, bindenv, s):\n \"\"\"Return True if s is an informal characterization of this\n clause (under binding environment bindenv) that a user may\n write to refer to it. Useful for letting the user specify\n clause priority overrides.\n \"\"\"\n return False\n \n def needs_filtering(self, bindenv):\n \"\"\"For enumerators, return whether or not a demand-filtered\n version of this clause should be used. For conditions raise\n TypeError.\n \"\"\"\n if self.kind is not Clause.KIND_ENUM:\n raise TypeError\n # By default, use a filter if any tagsin position is not\n # bound.\n return not set(self.enumvars_tagsin).issubset(bindenv)\n \n @abstractmethod\n def rate(self, bindenv):\n \"\"\"For the join heuristic, return a numerical ranking for this\n clause under a binding environment. A binding environment is\n a sequence of names of variables that are considered bound.\n See incoq.comp.order.\n \"\"\"\n \n def get_determined_vars(self, bindenv):\n \"\"\"Return a tuple of enumvars that are functionally determined\n by the union of the given bound vars and the remaining vars in\n this clause that aren't returned. The returned tuple may have\n duplicates and may include the given vars.\n \"\"\"\n return ()\n \n @abstractmethod\n def get_code(self, bindenv, body):\n \"\"\"Return code to run this clause in the way implied by a\n binding environment. body is the code that is enclosed in,\n or follows, this clause.\n \"\"\"\n\n\nclass EnumClause(Clause, ABCStruct):\n \n \"\"\"An normal enumeration (membership constraint) clause\n over a relation.\n \"\"\"\n \n kind = Clause.KIND_ENUM\n \n lhs = TypedField(str, seq=True)\n \"\"\"Tuple of variables on left-hand side.\"\"\"\n rel = TypedField(str)\n \"\"\"Name of iterated relation.\"\"\"\n \n @classmethod\n def from_expr(cls, node):\n \"\"\"Construct from a membership condition expression of form\n \n <vars> in <rel>\n \n Note that this is syntactically different from the form used\n in comprehensions, even though their textual representation\n in source code is the same.\n \"\"\"\n checktype(node, L.AST)\n \n left, op, right = L.get_cmp(node)\n checktype(op, L.In)\n lhs = L.get_vartuple(left)\n rel = L.get_name(right)\n return cls(lhs, rel)\n \n @classmethod\n def from_AST(cls, node, factory):\n \"\"\"Construct from an Enumerator node of form\n \n <vars> in <rel>\n \n Alternatively, the rhs may be a setmatch of a rel, where\n the mask is a lookupmask and the key is a vartuple.\n \"\"\"\n checktype(node, L.Enumerator)\n \n lhs = L.get_vartuple(node.target)\n rhs = node.iter\n \n if L.is_name(rhs):\n rel = L.get_name(rhs)\n \n elif isinstance(rhs, L.SetMatch) and L.is_vartuple(rhs.key):\n keyvars = L.get_vartuple(rhs.key)\n # Make sure we're dealing with a lookupmask and that the\n # key vars agree with the mask.\n mask = Mask(rhs.mask)\n assert mask.is_lookupmask\n assert mask.lookup_arity == len(keyvars)\n \n lhs = keyvars + lhs\n rel = L.get_name(rhs.target)\n \n else:\n raise TypeError\n \n return cls(lhs, rel)\n \n def __init__(self, lhs, rel):\n self.enumlhs = self.lhs\n self.enumrel = self.rel\n self.vars = self.enumvars\n \n def to_AST(self):\n return L.Enumerator(L.tuplify(self.lhs, lval=True),\n L.ln(self.rel))\n \n def rewrite_rel(self, rel, factory):\n # We specifically construct an EnumClause (not an instance of\n # a subclass) and rely on the factory to turn that into an\n # appropriate clause. This is to play nice with the object\n # clauses in incoq/obj/objclause.py.\n cl = EnumClause(self.enumlhs, rel)\n clast = cl.to_AST()\n return factory.from_AST(clast)\n \n def fits_string(self, bindenv, s):\n mask = Mask.from_vars(self.lhs, bindenv)\n return s == AuxmapSpec(self.rel, mask).lookup_name\n \n def rate(self, bindenv):\n mask = Mask.from_vars(self.lhs, bindenv)\n if mask.is_allbound:\n return Rate.CONSTANT_MEMBERSHIP\n elif mask.is_allunbound:\n return Rate.NOTPREFERRED\n else:\n return Rate.NORMAL\n \n def get_code(self, bindenv, body):\n mask = Mask.from_vars(self.lhs, bindenv)\n bvars, uvars, _eqs = mask.split_vars(self.lhs)\n return make_bindmatch(self.rel, mask, bvars, uvars, body)\n\n\nclass SubClause(Clause, ABCStruct):\n \n \"\"\"An enumerator that skips over a specified element.\"\"\"\n \n kind = Clause.KIND_ENUM\n \n robust = False\n \n cl = TypedField(Clause)\n \"\"\"Underlying clause.\"\"\"\n excl = TypedField(L.expr)\n \"\"\"Expression whose value is to be excluded.\"\"\"\n \n pat_mask = None\n con_mask = None\n tagsin_mask = None\n tagsout_mask = None\n \n @classmethod\n def from_AST(cls, node, factory):\n \"\"\"Construct from Enumerator node of form\n \n <vars> in <rel> - {<expr>}\n \"\"\"\n checktype(node, L.Enumerator)\n \n rhs, excl = L.get_singsub(node.iter)\n \n innernode = node._replace(iter=rhs)\n innerclause = factory.from_AST(innernode)\n \n return cls(innerclause, excl)\n \n def __init__(self, cl, excl):\n for attr in [\n 'isdelta', 'enumlhs', 'enumrel',\n 'pat_mask', 'con_mask', 'tagsin_mask', 'tagsout_mask',\n 'vars', 'eqvars', 'demname', 'demparams']:\n setattr(self, attr, getattr(cl, attr))\n \n def to_AST(self):\n code = self.cl.to_AST()\n assert isinstance(code, L.Enumerator)\n code = code._replace(iter=L.pe('ITER - {EXCL}',\n subst={'ITER': code.iter,\n 'EXCL': self.excl}))\n return code\n \n def rewrite_rel(self, rel, factory):\n new_cl = self.cl.rewrite_rel(rel, factory)\n return self._replace(cl=new_cl)\n \n def subtract_inner(self, excl, factory):\n new_cl = self.cl.subtract_inner(excl, factory)\n return self._replace(cl=new_cl)\n \n def fits_string(self, mask, s):\n return self.cl.fits_string(mask, s)\n \n def rate(self, bindenv):\n return self.cl.rate(bindenv)\n \n def get_code(self, bindenv, body):\n guard_code = L.pc('''\n if LHS != EXCL:\n BODY\n ''', subst={'LHS': L.tuplify(self.cl.enumlhs),\n 'EXCL': self.excl,\n '<c>BODY': body})\n \n return self.cl.get_code(bindenv, guard_code)\n\n\nclass AugClause(Clause, ABCStruct):\n \n \"\"\"An enumerator that runs for one extra element.\"\"\"\n \n kind = Clause.KIND_ENUM\n \n robust = False\n \n cl = TypedField(Clause)\n \"\"\"Underlying clause.\"\"\"\n extra = TypedField(L.expr)\n \"\"\"Expression whose value is to be added.\"\"\"\n \n pat_mask = None\n con_mask = None\n tagsin_mask = None\n tagsout_mask = None\n \n @classmethod\n def from_AST(cls, node, factory):\n \"\"\"Construct from Enumerator node of form\n \n <vars> in <rel> + {<expr>}\n \"\"\"\n checktype(node, L.Enumerator)\n \n rhs, extra = L.get_singadd(node.iter)\n \n innernode = node._replace(iter=rhs)\n innerclause = factory.from_AST(innernode)\n \n return cls(innerclause, extra)\n \n def __init__(self, cl, extra):\n for attr in [\n 'isdelta', 'enumlhs', 'enumrel',\n 'pat_mask', 'con_mask', 'tagsin_mask', 'tagsout_mask',\n 'vars', 'eqvars', 'demname', 'demparams']:\n setattr(self, attr, getattr(cl, attr))\n \n def to_AST(self):\n code = self.cl.to_AST()\n assert isinstance(code, L.Enumerator)\n code = code._replace(iter=L.pe('ITER + {EXTRA}',\n subst={'ITER': code.iter,\n 'EXTRA': self.extra}))\n return code\n \n def rewrite_rel(self, rel, factory):\n new_cl = self.cl.rewrite_rel(rel, factory)\n return self._replace(cl=new_cl)\n \n def subtract_inner(self, excl, factory):\n new_cl = self.cl.subtract_inner(excl, factory)\n return self._replace(cl=new_cl)\n \n def fits_string(self, mask, s):\n return self.cl.fits_string(mask, s)\n \n def rate(self, bindenv):\n return self.cl.rate(bindenv)\n \n def get_code(self, bindenv, body):\n # Hackish: Rather than add a factory parameter to this method,\n # I'm just going to forego the use of ClauseFactory.bind() and\n # construct SingletonClause directly.\n boundcl = SingletonClause(self.enumlhs, self.extra)\n code = self.cl.get_code(bindenv, body)\n code += boundcl.get_code(bindenv, body)\n return code\n\n\nclass LookupClause(EnumClause, ABCStruct):\n \n \"\"\"An enumerator over a singleton set of an SMLookup node.\n Basically acts like a normal EnumClause, but the forward\n direction takes constant time due to the functional\n dependency from keys to value.\n \"\"\"\n \n lhs = TypedField(str, seq=True)\n \"\"\"Enumeration variables.\"\"\"\n rel = TypedField(str)\n \"\"\"Name of iterated relation.\"\"\"\n \n @classmethod\n def from_AST(cls, node, factory):\n \"\"\"Construct from an Enumerator node of form\n \n var in {<rel>.smlookup(<mask>, <key vars>)}\n \n \"\"\"\n checktype(node, L.Enumerator)\n \n var = L.get_name(node.target)\n sm = L.get_singletonset(node.iter)\n checktype(sm, L.SMLookup)\n rel = L.get_name(sm.target)\n mask = Mask(sm.mask)\n keyvars = L.get_vartuple(sm.key)\n # Ensure the mask is consistent with how it's used.\n if mask != Mask.from_keylen(len(keyvars)):\n raise TypeError\n \n lhs = keyvars + (var,)\n return cls(lhs, rel)\n \n def to_AST(self):\n mask = Mask.from_keylen(len(self.lhs) - 1)\n keyvars = self.lhs[:-1]\n var = self.lhs[-1]\n sm = L.SMLookup(L.ln(self.rel), mask.make_node().s,\n L.tuplify(keyvars), None)\n return L.Enumerator(L.sn(var), L.Set((sm,)))\n \n def rewrite_subst(self, subst, factory):\n # The normal rewriting won't get the smlookup keys.\n new_lhs = apply_subst_tuple(self.lhs, subst)\n return self._replace(lhs=new_lhs)\n \n def rate(self, bindenv):\n mask = Mask.from_vars(self.lhs, bindenv)\n if mask.is_keymask:\n return Rate.CONSTANT\n return super().rate(bindenv)\n\n\nclass SingletonClause(Clause, ABCStruct):\n \n \"\"\"An enumerator over a singleton set, i.e., that binds its\n left-hand side to a single value.\n \"\"\"\n \n kind = Clause.KIND_ENUM\n \n robust = False\n \n lhs = TypedField(str, seq=True)\n \"\"\"Enumeration variables.\"\"\"\n val = TypedField(L.expr)\n \"\"\"Expression computing value of singleton element.\"\"\"\n \n @classmethod\n def from_expr(cls, node):\n \"\"\"Construct from a condition expression of form\n \n <vars> == <rel>\n \"\"\"\n checktype(node, L.AST)\n \n left, op, val = L.get_cmp(node)\n checktype(op, L.Eq)\n lhs = L.get_vartuple(left)\n \n return cls(lhs, val)\n \n @classmethod\n def from_AST(cls, node, factory):\n \"\"\"Construct from Enumerator node of form\n \n <vars> in {<expr>}\n \"\"\"\n checktype(node, L.Enumerator)\n \n lhs = L.get_vartuple(node.target)\n val = L.get_singletonset(node.iter)\n \n return cls(lhs, val)\n \n def __init__(self, lhs, val):\n self.enumlhs = self.lhs\n self.enumrel = None\n self.vars = self.enumvars\n \n def to_AST(self):\n return L.Enumerator(L.tuplify(self.lhs, lval=True),\n L.Set((self.val,)))\n \n def rate(self, bindenv):\n return Rate.CONSTANT\n \n def get_code(self, bindenv, body):\n mask = Mask.from_vars(self.lhs, bindenv)\n bvars, uvars, _eqs = mask.split_vars(self.lhs)\n return make_tuplematch(self.val, mask, bvars, uvars, body)\n\n\nclass DeltaClause(Clause, ABCStruct):\n \n \"\"\"Clause for the update to a join.\"\"\"\n \n kind = Clause.KIND_ENUM\n \n isdelta = True\n robust = False\n \n lhs = TypedField(str, seq=True)\n \"\"\"Enumeration variables.\"\"\"\n rel = TypedField(str)\n \"\"\"Relation that was updated.\"\"\"\n val = TypedField(L.expr)\n \"\"\"Expression computing value of singleton element.\"\"\"\n limit = TypedField(int)\n \n @classmethod\n def from_AST(cls, node, factory):\n \"\"\"Construct from Enumerator node of form\n \n <vars> in deltamatch(<rel>, <mask>, <val>, <limit>)\n \"\"\"\n checktype(node, L.Enumerator)\n \n lhs = L.get_vartuple(node.target)\n checktype(node.iter, L.DeltaMatch)\n rel = L.get_name(node.iter.target)\n mask = Mask(node.iter.mask)\n val = node.iter.elem\n limit = node.iter.limit\n if limit not in [0, 1]:\n raise TypeError\n \n inferred_mask = Mask.from_vars(lhs, lhs)\n assert mask == inferred_mask\n \n return cls(lhs, rel, val, limit)\n \n def __init__(self, lhs, rel, val, limit):\n assert limit in [0, 1]\n self.enumlhs = self.lhs\n self.enumrel = rel\n self.vars = self.enumvars\n \n def to_AST(self):\n mask = Mask.from_vars(self.lhs, self.lhs)\n return L.Enumerator(L.tuplify(self.lhs, lval=True),\n L.DeltaMatch(L.ln(self.rel), mask.make_node().s,\n self.val, self.limit))\n \n def rewrite_rel(self, rel, factory):\n # In the case of delta clauses over pair relations,\n # this allows the use of the pair relation to be filtered.\n return self._replace(rel=rel)\n \n def needs_filtering(self, bindenv):\n return False\n \n def rate(self, bindenv):\n return Rate.FIRST\n \n def get_code(self, bindenv, body):\n deltamask = Mask.from_vars(self.lhs, self.lhs)\n mask = Mask.from_vars(self.lhs, bindenv)\n bvars, uvars, _eqs = mask.split_vars(self.lhs)\n if mask.has_wildcards:\n # Can this be streamlined into something more readable,\n # like expressing the deltamatch as an If-guard? \n val = L.DeltaMatch(L.ln(self.rel), deltamask.make_node().s,\n self.val, self.limit)\n return L.pc('''\n for UVARS in setmatch(VAL, MASK, BVARS):\n BODY\n ''', subst={'VAL': val,\n 'MASK': mask.make_node(),\n 'BVARS': L.tuplify(bvars),\n 'UVARS': L.tuplify(uvars, lval=True),\n '<c>BODY': body})\n else:\n return make_tuplematch(self.val, mask, bvars, uvars, body)\n\n\nclass CondClause(Clause, ABCStruct):\n \n \"\"\"A condition expression clause.\"\"\"\n \n kind = Clause.KIND_COND\n \n cond = TypedField(L.expr)\n \"\"\"Condition expression.\"\"\"\n \n @classmethod\n def from_AST(cls, node, factory):\n \"\"\"Construct from expression node.\"\"\"\n checktype(node, L.expr)\n \n return cls(node)\n \n def __init__(self, cond):\n self.vars = tuple(L.VarsFinder.run(cond, ignore_functions=True))\n \n if L.is_vareqcmp(cond):\n self.eqvars = L.get_vareqcmp(cond)\n else:\n self.eqvars = None\n \n def to_AST(self):\n return self.cond\n \n def fits_string(self, bindenv, s):\n return self.cond == L.pe(s)\n \n def rate(self, bindenv):\n if set(self.vars).issubset(bindenv):\n return Rate.CONSTANT\n else:\n return Rate.UNRUNNABLE\n \n def get_code(self, bindenv, body):\n assert set(self.vars).issubset(bindenv)\n code = L.pc('''\n if COND:\n BODY\n ''', subst={'COND': self.cond, '<c>BODY': body})\n return code\n\n\nclass ClauseFactory:\n \n \"\"\"Factory for constructing clauses from ASTs.\n Instantiation not needed.\n \"\"\"\n \n typecheck = True\n \"\"\"Whether or not to use clauses that insert type checks\n in generated code.\n \"\"\"\n \n @classmethod\n def get_clause_kinds(cls):\n \"\"\"Clause classes to use to try to construct a clause from\n an AST. Subclasses should override this to prepend their own\n clause classes to the list.\n \"\"\"\n return [\n EnumClause,\n SubClause,\n AugClause,\n # Try LookupClause before SingletonClause since\n # the former's more specific.\n LookupClause,\n SingletonClause,\n DeltaClause,\n CondClause,\n ]\n \n @classmethod\n def from_AST(cls, node):\n # Try each clause until one doesn't raise TypeError.\n for enumcls in cls.get_clause_kinds():\n try:\n return enumcls.from_AST(node, cls)\n except TypeError:\n pass\n else:\n raise TypeError('Cannot construct clause from node: ' +\n L.ts(node))\n \n @classmethod\n def rewrite_subst(cls, cl, subst):\n \"\"\"Rewrite a clause to substitute variables in conditions\n and on the LHS of enumerators, according to the given mapping.\n The RHS of enumerators is (usually) unaffected. The substitution\n mapping is as for incast.VarRenamer. Also applies to conditions.\n \"\"\"\n return cl.rewrite_subst(subst, cls)\n \n @classmethod\n def rewrite_lhs(cls, cl, subst):\n \"\"\"As above but strictly only apply to the LHS of an enumerator.\"\"\"\n return cl.rewrite_lhs(subst, cls)\n \n @classmethod\n def rewrite_rel(cls, cl, rel):\n \"\"\"For an enumerator over a relation (i.e. non-None enumrel),\n produce a clause with rel as the new iterated relation.\n For all other clauses, raise TypeError.\n \"\"\"\n return cl.rewrite_rel(rel, cls)\n \n # Note that subtract/augment should produce the opposite kind\n # of clause for negated enums.\n \n @classmethod\n def bind(cls, cl, val, *, augmented):\n \"\"\"For an enumerator, produce a clause where the LHS is\n bound directly to val. val need not be a value in the RHS.\n For conditions, raise TypeError.\n \"\"\"\n limit = 0 if augmented else 1\n return DeltaClause(cl.enumlhs, cl.enumrel, val, limit)\n \n @classmethod\n def subtract(cls, cl, excl):\n \"\"\"For an enumerator, produce a subtractive clause that\n excludes the given element. For conditions, raise TypeError.\n \"\"\"\n if cl.kind is not cl.KIND_ENUM:\n raise TypeError\n return SubClause(cl, excl)\n \n @classmethod\n def subtract_inner(cls, cl, excl):\n \"\"\"As above, but apply the subtraction to the innermost clause,\n so an outer augmented clause can still undo it.\n \"\"\"\n if cl.kind is not cl.KIND_ENUM:\n raise TypeError\n return cl.subtract_inner(excl, cls)\n \n @classmethod\n def augment(cls, cl, extra):\n \"\"\"For an enumerator, produce an augmented clause that\n includes the given element. For conditions, raise TypeError.\n \"\"\"\n if cl.kind is not cl.KIND_ENUM:\n raise TypeError\n return AugClause(cl, extra)\n \n @classmethod\n def membercond_to_enum(cls, cl):\n \"\"\"For a condition clause that expresses a membership, return\n an equivalent enumerator clause. For other kinds of conditions,\n return the same clause. For enumerators, raise TypeError.\n \"\"\"\n if cl.kind is not Clause.KIND_COND:\n raise TypeError\n \n compre_ast = None\n clast = cl.to_AST()\n if L.is_cmp(clast):\n lhs, op, rhs = L.get_cmp(clast)\n if (L.is_vartuple(lhs) and\n isinstance(op, L.In)):\n compre_ast = L.Enumerator(\n L.tuplify(L.get_vartuple(lhs), lval=True),\n rhs)\n \n if compre_ast is None:\n return cl\n else:\n return cls.from_AST(compre_ast)\n \n @classmethod\n def enum_to_membercond(cls, cl):\n \"\"\"For enumerators, return an equivalent membership clause.\n For conditions, raise TypeError.\n \"\"\"\n if cl.kind is not Clause.KIND_ENUM:\n raise TypeError\n \n clast = cl.to_AST()\n lhs = clast.target\n lhs = L.ContextSetter.run(lhs, L.Load)\n rhs = clast.iter\n cond_ast = L.cmp(lhs, L.In(), rhs)\n return cls.from_AST(cond_ast)\n" }, { "alpha_fraction": 0.47519582509994507, "alphanum_fraction": 0.5143603086471558, "avg_line_length": 20.27777862548828, "blob_id": "c67dae694c44baa2b025bda83c103549e95e0dc4", "content_id": "32400499a3afa449891b8234ef7213271f118b05", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 383, "license_type": "no_license", "max_line_length": 66, "num_lines": 18, "path": "/incoq/tests/programs/deminc/wildcard_in.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "# Make sure demand filtering for clauses with wildcards works.\n\nfrom incoq.runtime import *\n\nQUERYOPTIONS(\n '{b for (a2, b) in R for (b2, _) in R if a == a2 if b == b2}',\n impl = 'dem',\n uset_force = True,\n)\n\nR = Set()\n\nfor x, y in [(1, 2), (2, 3), (3, 4)]:\n R.add((x, y))\n\na = 1\nprint(sorted({b for (a2, b) in R for (b2, _) in R\n if a == a2 if b == b2}))\n" }, { "alpha_fraction": 0.5095694065093994, "alphanum_fraction": 0.5239234566688538, "avg_line_length": 11.666666984558105, "blob_id": "b24408fd16d765cef2e00cdde50df00c6394d53f", "content_id": "4028acf591aaf2862f3aeb589ae256e55f930292", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 418, "license_type": "no_license", "max_line_length": 47, "num_lines": 33, "path": "/incoq/tests/programs/aggr/obj_in.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "# Aggregate of a variable in the object domain.\n\nfrom incoq.runtime import *\n\nOPTIONS(\n obj_domain = True,\n default_impl = 'inc',\n)\nQUERYOPTIONS(\n 'sum(s)',\n params = ['s'],\n impl = 'dem',\n)\nQUERYOPTIONS(\n 'sum(o.f)',\n params = ['o'],\n impl = 'dem',\n)\n\nr = Set()\nt = Set()\no = Obj()\no.f = t\n\nfor x in [1, 2, 3, 4, 5]:\n r.add(x)\n t.add(x)\n\nr.remove(5)\n\ns = r\nprint(sum(s))\nprint(sum(o.f))\n" }, { "alpha_fraction": 0.7936508059501648, "alphanum_fraction": 0.7936508059501648, "avg_line_length": 20, "blob_id": "fcf4bf27513823d871ef7148e255b97f7a755374", "content_id": "bf4c76bab576101cb454c7b9a9dfcd9f91b67c3c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 63, "license_type": "no_license", "max_line_length": 34, "num_lines": 3, "path": "/experiments/rbac/corerbac/coreRBAC_orig.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from .coreRBAC_in import *\n\nCheckAccess_nodemand = CheckAccess\n" }, { "alpha_fraction": 0.45150500535964966, "alphanum_fraction": 0.49832776188850403, "avg_line_length": 12.590909004211426, "blob_id": "aba8d541463a1b85d367cf2a24cede2aa64c3aed", "content_id": "92bf61f1543d7047cf1f30b575328886738cad2d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 299, "license_type": "no_license", "max_line_length": 45, "num_lines": 22, "path": "/incoq/tests/programs/auxmap/deadcode_in.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "# Dead-code elimination.\n\nfrom incoq.runtime import *\n\nOPTIONS(\n deadcode_keepvars = ['S'],\n)\n\nR = Set()\nS = Set()\nT = Set()\n\nfor x, y in [(1, 2), (1, 3), (2, 3), (1, 4)]:\n R.add((x, y))\n S.add((x, y))\n T.add((x, y))\n\nR.remove((1, 4))\nS.remove((1, 4))\nT.remove((1, 4))\n\nprint(sorted(R))\n" }, { "alpha_fraction": 0.5143769979476929, "alphanum_fraction": 0.5463258624076843, "avg_line_length": 16.38888931274414, "blob_id": "a5e1eeeaca136416f0a79f0bf9defeb9c1ce6b12", "content_id": "bc6e23d8daba1ee3c704dd1136a396fedef78959", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 313, "license_type": "no_license", "max_line_length": 58, "num_lines": 18, "path": "/incoq/tests/programs/comp/expr_in.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "# Result expressions that aren't just tuples of variables.\n\nfrom incoq.runtime import *\n\nQUERYOPTIONS(\n '{(f(x), y + 1, None) for (x, y) in S}',\n impl = 'inc',\n)\n\nS = Set()\n\ndef f(y):\n return True\n\nfor v1, v2 in [(1, 2), (3, 4)]:\n S.add((v1, v2))\n\nprint(sorted({(f(x), y + 1, None) for (x, y) in S}))\n" }, { "alpha_fraction": 0.5889361500740051, "alphanum_fraction": 0.6178723573684692, "avg_line_length": 29.921052932739258, "blob_id": "4bbf7c3ce633f86fbd4402b200b7e3a3118bead3", "content_id": "6d64c1e87d2b89c2e1533f3291e369c9b6a62656", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1175, "license_type": "no_license", "max_line_length": 95, "num_lines": 38, "path": "/incoq/tests/programs/deminc/aug2_in.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "# Another test of augmented self-join code with demand.\n# Demand invariants should be maintained in a correct order\n# for demand propagation. When turning normal clauses into\n# filtered ones, augmented clauses should become non-augmented\n# and non-augmented should become subtractive.\n\nfrom incoq.runtime import *\n\nOPTIONS(\n selfjoin_strat = 'aug',\n)\n\nQUERYOPTIONS(\n '{z for (x, x2) in E for (x3, y) in E for (y2, z) in S if x == x2 if x2 == x3 if y == y2}',\n impl = 'dem',\n uset_force = True,\n)\n\nE = Set()\nS = Set()\n\n# Query once to demand it so the below addition updates cause maintenance.\nprint(sorted({z for (x, x2) in E for (x3, y) in E for (y2, z) in S\n if x == x2 if x2 == x3 if y == y2}))\n\n# Note that we're adding to S first so E will double join with\n# it if done incorrectly.\nS.add((1, 2))\nE.add((1, 1))\n\nprint(sorted({z for (x, x2) in E for (x3, y) in E for (y2, z) in S\n if x == x2 if x2 == x3 if y == y2}))\n\nS.remove((1, 2))\n\n# If we screwed up our reference counts, we won't get the right answer.\nprint(sorted({z for (x, x2) in E for (x3, y) in E for (y2, z) in S\n if x == x2 if x2 == x3 if y == y2}))\n" }, { "alpha_fraction": 0.5787419676780701, "alphanum_fraction": 0.5787419676780701, "avg_line_length": 30.114286422729492, "blob_id": "12e384c1c43c5d24de925f7803c5d324bde279a3", "content_id": "86a6204ea312c7fc4a4fe7cae8f1d8d2368ce492", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4356, "license_type": "no_license", "max_line_length": 75, "num_lines": 140, "path": "/incoq/compiler/demand/demclause.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Demand-related clauses.\"\"\"\n\n\n__all__ = [\n 'DemClause',\n 'DemClauseFactory_Mixin',\n]\n\n\nfrom simplestruct import TypedField\nfrom simplestruct.type import checktype\n\nimport incoq.compiler.incast as L\nfrom incoq.compiler.set import Mask\nfrom incoq.compiler.comp import Rate, Clause\nfrom incoq.compiler.comp.clause import (ABCStruct, ClauseFactory,\n apply_subst_tuple)\n\n\nclass DemClause(Clause, ABCStruct):\n \n \"\"\"An enumerator clause who's RHS is the result of a query with\n demand.\n \"\"\"\n \n kind = Clause.KIND_ENUM\n \n inc_safe = False\n \n cl = TypedField(Clause)\n \"\"\"Underlying clause.\"\"\"\n demname = TypedField(str)\n \"\"\"Demand name.\"\"\"\n demparams = TypedField(str, seq=True)\n \"\"\"Demand parameters.\"\"\"\n \n @property\n def pat_mask(self):\n return self.cl.pat_mask\n \n @property\n def con_mask(self):\n assert len(self.enumlhs) == len(self.cl.con_mask)\n # The underlying mask position must be True and the\n # corresponding var can't be a demparam.\n return tuple(b and v not in self.demparams\n for v, b in zip(self.enumlhs, self.cl.con_mask))\n \n @property\n def tagsin_mask(self):\n return (v in self.demparams\n for v in self.enumlhs) \n \n @property\n def tagsout_mask(self):\n return (v not in self.demparams\n for v in self.enumlhs)\n \n @classmethod\n def from_AST(cls, node, factory):\n \"\"\"Construct from Enumerator node of form\n \n <vars> in DemQuery(...)\n \"\"\"\n checktype(node, L.Enumerator)\n \n if not isinstance(node.iter, L.DemQuery):\n raise TypeError\n if not all(isinstance(a, L.Name) for a in node.iter.args):\n raise TypeError\n demname = node.iter.demname\n demparams = tuple(a.id for a in node.iter.args)\n rhs = node.iter.value\n \n innernode = node._replace(iter=rhs)\n innerclause = factory.from_AST(innernode)\n \n return cls(innerclause, demname, demparams)\n \n def __init__(self, cl, demname, demparams):\n for attr in [\n 'isdelta', 'enumlhs', 'enumrel',\n 'vars', 'eqvars', 'robust']:\n setattr(self, attr, getattr(cl, attr))\n \n def to_AST(self):\n code = self.cl.to_AST()\n assert isinstance(code, L.Enumerator)\n code = code._replace(\n iter=L.DemQuery(self.demname,\n tuple(L.ln(p) for p in self.demparams),\n code.iter))\n return code\n \n def rewrite_subst(self, subst, factory):\n new_cl = self.cl.rewrite_subst(subst, factory)\n new_demparams = apply_subst_tuple(self.demparams, subst)\n return self._replace(cl=new_cl, demparams=new_demparams)\n \n def rewrite_rel(self, rel, factory):\n new_cl = self.cl.rewrite_rel(rel, factory)\n return self._replace(cl=new_cl)\n \n def subtract_inner(self, excl, factory):\n new_cl = self.cl.subtract_inner(excl, factory)\n return self._replace(cl=new_cl)\n \n def fits_string(self, mask, s):\n return self.cl.fits_string(mask, s)\n \n def rate(self, bindenv):\n # Require demand parameters to be bound.\n mask = Mask.from_vars(self.enumlhs, bindenv)\n bounds, _unbounds, _eqs = mask.split_vars(self.cl.lhs)\n \n if not set(bounds).issuperset(set(self.demparams)):\n return Rate.UNRUNNABLE\n \n return self.cl.rate(bindenv)\n \n def get_code(self, bindenv, body):\n # Just stick a DemQuery node in before the regular code.\n # TODO: This is a little ugly in that it results in\n # littering the code with \"None\"s. Maybe make a special\n # case in the translation of DemQuery to avoid this.\n code = self.cl.get_code(bindenv, body)\n new_node = L.Expr(value=L.DemQuery(\n self.demname,\n tuple(L.ln(p) for p in self.demparams),\n None))\n code = (new_node,) + code\n return code\n\n\nclass DemClauseFactory_Mixin(ClauseFactory):\n \n @classmethod\n def get_clause_kinds(cls):\n dem_clauses = [DemClause]\n return dem_clauses + super().get_clause_kinds()\n" }, { "alpha_fraction": 0.5438888669013977, "alphanum_fraction": 0.5498431324958801, "avg_line_length": 27.977737426757812, "blob_id": "0b48e5ed351b59430d2a5700b4fcbb22d7f73df4", "content_id": "403b58f30ee3c6fd0ea3b8d63204786bb399f545", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15619, "license_type": "no_license", "max_line_length": 89, "num_lines": 539, "path": "/main.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Invoke the transformation system.\"\"\"\n\n\nfrom incoq.transform import *\n\n\nSTATS_DIR = 'stats/'\nSTATS_FILE = STATS_DIR + 'transstats.pickle'\n\n\nall_tasks = []\n\ndef add_task(task):\n all_tasks.append(task)\n\ndef add_impls(display_name, base_name, templates):\n add_task(make_in_task(display_name, base_name))\n for template in templates:\n in_name = base_name + '_in.py'\n out_name = base_name + '.py'\n task = Task(display_name, in_name, out_name, {}, {})\n add_task(task_from_template(task, template))\n\n\n# ---- Program-specific tasks ----\n\nclass INC_SUBDEM_LAMUTEX(INC_SUBDEM):\n _inherit_fields = True\n \n msgset_t = '''set(tuple([top, top, tuple([\n enum('msglabel', str),\n subtype('clocks', number),\n subtype('procs', number)])]))'''\n \n extra_nopts = {\n 'var_types': {\n '_PReceivedEvent_0': msgset_t,\n '_PReceivedEvent_1': msgset_t,\n '_PReceivedEvent_2': msgset_t,\n 'SELF_ID': \"subtype('procs', number)\",\n 'P_mutex_c': \"subtype('clocks', number)\",\n 'P_s': \"set(subtype('procs', number))\",\n },\n 'default_uset_lru': 1,\n }\n\nclass INC_SUBDEM_LAMUTEX_ORIG(INC_SUBDEM):\n _inherit_fields = True\n \n msgset_t = '''set(tuple([top, top, tuple([\n enum('msglabel', str),\n subtype('clocks', number),\n subtype('procs', number)])]))'''\n \n extra_nopts = {\n 'var_types': {\n '_PReceivedEvent_0': msgset_t,\n 'SELF_ID': \"subtype('procs', number)\",\n 'P_mutex_c': \"subtype('clocks', number)\",\n 'P_s': \"set(subtype('procs', number))\",\n 'P_q': '''set(tuple([enum('msglabel', str),\n subtype('clocks', number),\n subtype('procs', number)]))''',\n },\n 'default_uset_lru': 1,\n }\n\nclass DEM_OBJ_NS_RATOKEN(DEM_OBJ_NS):\n _inherit_fields = True\n \n msgset1_t = '''set(tuple([top, top, tuple([\n enum('msglabel', str),\n subtype('clocks', number),\n subtype('procs', number)])]))'''\n msgset2_t = '''set(tuple([top, top, tuple([\n enum('msglabel', str),\n top])]))'''\n msgset3_t = '''set(tuple([top, top, tuple([\n enum('msglabel', str),\n dict(top, top)])]))'''\n \n extra_nopts = {\n 'var_types': {\n '_PReceivedEvent_2': msgset1_t,\n '_PSentEvent_3': msgset2_t,\n '_PReceivedEvent_4': msgset3_t,\n '_PSentEvent_5': msgset3_t,\n 'SELF_ID': \"subtype('procs', number)\",\n 'P_ps': \"set(subtype('procs', number))\",\n 'P_token': \"dict(top, top)\",\n },\n }\n\nCHECKACCESS_STR = '{r for r in ROLES if (session,r) in SR if (operation,object,r) in PR}'\nASSIGNEDROLES_STR = '{r for r in ROLES if (user,r) in UR}'\nDELETESESSION_STR = '{(session,r) for r in ROLES if (session, r) in SR}'\nclass INC_CORERBAC_CA(COM):\n _inherit_fields = True\n \n output_suffix = 'checkaccess_inc'\n display_suffix = 'Unfiltered (CA)'\n \n extra_qopts = {\n CHECKACCESS_STR: {'impl': 'inc'},\n ASSIGNEDROLES_STR: {'impl': 'inc'},\n DELETESESSION_STR: {'impl': 'inc'},\n }\n\nclass DEM_CORERBAC_CA(COM):\n _inherit_fields = True\n \n output_suffix = 'checkaccess_dem'\n display_suffix = 'Filtered (CA)'\n \n extra_qopts = {\n CHECKACCESS_STR: {'impl': 'dem',\n 'uset_mode': 'explicit',\n 'uset_params': ('object',),\n 'demand_reorder': [0, 3, 1, 2]},\n ASSIGNEDROLES_STR: {'impl': 'inc'},\n DELETESESSION_STR: {'impl': 'inc'},\n }\n\n\n# ---- Uncomment to rebuild experiment programs. ---\n\n#add_impls('Social', 'experiments/twitter/twitter', [\n# INC,\n# DEM,\n# DEM_SINGLE_TAG,\n# DEM_NORCELIM,\n# DEM_NOTYPECHECK,\n# DEM_INLINE,\n#])\n#\n#add_impls('Auth', 'experiments/django/django', [\n# INC,\n# DEM,\n#])\n#add_impls('Simplified Auth', 'experiments/django/django_simp', [\n# INC,\n# DEM,\n#])\n#\n#add_impls('Wifi', 'experiments/wifi/wifi', [\n# INC,\n# DEM,\n#])\n#\n#for level in [\n# '1',\n# '2',\n# '3'\n# ]:\n# add_impls('JQL {}'.format(level), 'experiments/jql/jql_{}'.format(level), [\n## AUX,\n# INC,\n# DEM,\n## DEM_NO_TAG_CHECK,\n# ])\n#\n#add_impls('Constr. RBAC', 'experiments/rbac/constrainedrbac/crbac', [\n# AUX,\n# INC,\n# DEM,\n#])\n#\n#add_impls('CoreRBAC', 'experiments/rbac/corerbac/coreRBAC', [\n# INC_CORERBAC_CA,\n# DEM_CORERBAC_CA,\n# INC,\n# DEM,\n#])\n#\n#add_impls('bday', 'experiments/other/bday/bday', [\n# INC,\n#])\n#\n#add_impls('clpaxos', 'experiments/distalgo/clpaxos/clpaxos_inc', [\n# INC_SUBDEM,\n# DEM,\n#])\n#add_impls('crleader', 'experiments/distalgo/crleader/crleader_inc', [\n# INC_SUBDEM,\n# DEM,\n#])\n#add_impls('dscrash', 'experiments/distalgo/dscrash/dscrash_inc', [\n# DEM_OBJ_NS,\n#])\n#add_impls('hsleader', 'experiments/distalgo/hsleader/hsleader_inc', [\n# INC_SUBDEM,\n# DEM,\n#])\n#add_impls('lamutex', 'experiments/distalgo/lamutex/lamutex_inc', [\n# INC_SUBDEM_LAMUTEX,\n# DEM_LRU,\n#])\n#add_impls('lamutex opt1', 'experiments/distalgo/lamutex/lamutex_opt1_inc', [\n# INC_SUBDEM_LAMUTEX,\n# DEM,\n#])\n#add_impls('lamutex opt2', 'experiments/distalgo/lamutex/lamutex_opt2_inc', [\n# INC_SUBDEM_LAMUTEX,\n# DEM_LRU,\n#])\n#add_impls('lamutex orig', 'experiments/distalgo/lamutex/lamutex_orig_inc', [\n# INC_SUBDEM_LAMUTEX_ORIG,\n# DEM_LRU,\n#])\n#add_impls('lapaxos', 'experiments/distalgo/lapaxos/lapaxos_inc', [\n# INC_SUBDEM,\n# DEM,\n#])\n#add_impls('ramutex', 'experiments/distalgo/ramutex/ramutex_inc', [\n# INC_SUBDEM,\n# DEM,\n#])\n#add_impls('ratoken', 'experiments/distalgo/ratoken/ratoken_inc', [\n# DEM_OBJ_NS_RATOKEN,\n#])\n#add_impls('sktoken', 'experiments/distalgo/sktoken/sktoken_inc', [\n# DEM_OBJ_NS,\n#])\n#add_impls('2pcommit', 'experiments/distalgo/tpcommit/tpcommit_inc', [\n# INC_SUBDEM,\n# DEM,\n#])\n#add_impls('vrpaxos', 'experiments/distalgo/vrpaxos/vrpaxos_inc', [\n# DEM,\n#])\n\n\n# ---- Uncomment to rebuild test programs. ----\n\ntest_programs = [\n# 'auxmap/basic',\n# 'auxmap/deadcode',\n# 'auxmap/degenerate',\n# 'auxmap/equality',\n# 'auxmap/inline',\n# 'auxmap/wildcard',\n#\n# 'comp/basic',\n# 'comp/deltaeq',\n# 'comp/deltawild',\n# 'comp/deltawildeq',\n# 'comp/expr',\n# 'comp/implmode',\n# 'comp/inline',\n# 'comp/nonpattern',\n# 'comp/parameter',\n# 'comp/inconlyonce',\n# 'comp/pattern',\n# 'comp/patternmaint',\n# 'comp/setmatchcomp',\n# 'comp/sjaug',\n# 'comp/sjsub',\n# 'comp/uset/uset',\n# 'comp/uset/uset_explicit',\n# 'comp/uset/auto',\n# 'comp/uset/nodemand',\n# 'comp/uset/lru',\n# 'comp/nested/basic',\n# 'comp/nested/obj',\n# 'comp/nested/outline',\n# 'comp/nested/param',\n# 'comp/tup/flatten',\n# 'comp/macroupdate',\n# 'comp/unhandled',\n# 'comp/types',\n#\n# 'objcomp/batch',\n# 'objcomp/auxonly',\n# 'objcomp/expr',\n# 'objcomp/inc',\n# 'objcomp/if',\n# 'objcomp/pairmode',\n# 'objcomp/notc',\n# 'objcomp/map',\n# 'objcomp/inputrel',\n# 'objcomp/autoflatten',\n# \n# 'deminc/aug1',\n# 'deminc/aug2',\n# 'deminc/basic',\n# 'deminc/nested',\n# 'deminc/nested_subdem',\n# 'deminc/nocheck',\n# 'deminc/nodas',\n# 'deminc/obj',\n# 'deminc/objwild',\n# 'deminc/wildcard',\n# 'deminc/reorder',\n# 'deminc/tup/basic',\n# 'deminc/tup/inc',\n# 'deminc/tup/obj',\n# 'deminc/tup/objnest',\n#\n# 'aggr/basic',\n# 'aggr/comp',\n# 'aggr/inline',\n# 'aggr/minmax',\n# 'aggr/obj',\n# 'aggr/params',\n# 'aggr/rewrite',\n# 'aggr/tuple',\n# 'aggr/uset',\n# 'aggr/lru',\n# 'aggr/nested/basic',\n# 'aggr/nested/aggrdem',\n# 'aggr/nested/compdem',\n# 'aggr/nested/halfdemand',\n# 'aggr/nested/obj',\n]\n\nfor name in test_programs:\n add_task(make_testprogram_task(name))\n\n\nelapsed = do_tasks(all_tasks, STATS_FILE)\n\nprint('Done ({:.3f} s)'.format(elapsed))\n\nfrom incoq.transform import StatsDB, Session, StandardSchema\n\nclass RunningExSchema(StatkeySchema):\n \n cols = [\n ('lines', 'LOC', None),\n ('trans time', 'Time', '.2f'),\n ]\n \n rows = [\n ('Social Input', 'Running ex Input'),\n ('Social Unfiltered', 'Running ex Incremental'),\n ('Social Filtered', 'Running ex Filtered'),\n ('Social Filtered (no type checks)',\n 'Running ex Filtered (no type checks)'),\n ('Social Filtered (no rc elim.)',\n 'Running ex Filtered (no rc elim.)'),\n ('Social Filtered (inlined)',\n 'Running ex Filtered (inlined)'),\n ('Social Filtered (single tag)', 'Running ex Filtered (osq strat)'),\n ]\n\nclass ComparisonSchema(OrigIncFilterSchema):\n \n def _rowgen(name):\n return ([name + ' Input', name + ' Unfiltered', name + ' Filtered'],\n name)\n \n rows = [\n _rowgen('Wifi'),\n _rowgen('Auth'),\n _rowgen('Simplified Auth'),\n _rowgen('JQL 1'),\n _rowgen('JQL 2'),\n _rowgen('JQL 3'),\n ]\n\nclass ApplicationsSchema(OrigIncFilterSchema):\n \n def _rowgen(name, dispname=None):\n if dispname is None:\n dispname = name\n return ([name + ' Input', name + ' Unfiltered', name + ' Filtered'],\n dispname)\n \n def _rowgen2(name):\n return ([name + ' Input', name + ' Unfiltered (obj)',\n name + ' Filtered (obj)'],\n name)\n \n rows = [\n (['CoreRBAC Input', 'CoreRBAC Unfiltered (CA)',\n 'CoreRBAC Filtered (CA)'],\n 'CheckAccess'),\n _rowgen('CoreRBAC'),\n _rowgen('Constr. RBAC', 'SSD'),\n (['lamutex orig Input', 'lamutex orig Unfiltered',\n 'lamutex orig Filtered'], 'lamutex_orig'),\n (['lamutex Input', 'lamutex Unfiltered', 'lamutex Filtered'],\n 'lamutex_spec'),\n (['lamutex opt2 Input', 'lamutex opt2 Unfiltered',\n 'lamutex opt2 Filtered'],\n 'lamutex_specsimp'),\n _rowgen('2pcommit'),\n _rowgen('clpaxos'),\n _rowgen('crleader'),\n _rowgen2('dscrash'),\n _rowgen('hsleader'),\n# _rowgen('lapaxos'),\n _rowgen('ramutex'),\n _rowgen2('ratoken'),\n# _rowgen2('sktoken'),\n ]\n\nclass DistalgoSchema(OrigIncFilterSchema):\n \n def _rowgen(name):\n return ([name + ' Input', name + ' Unfiltered', name + ' Filtered'],\n name)\n \n def _rowgen2(name):\n return ([name + ' Input', name + ' Unfiltered (obj)',\n name + ' Filtered (obj)'],\n name)\n \n rows = [\n _rowgen('2pcommit'),\n _rowgen('clpaxos'),\n _rowgen('crleader'),\n _rowgen2('dscrash'),\n _rowgen('hsleader'),\n _rowgen('lamutex'),\n _rowgen('lamutex opt1'),\n _rowgen('lamutex opt2'),\n _rowgen('lamutex orig'),\n# _rowgen('lapaxos'),\n _rowgen('ramutex'),\n _rowgen2('ratoken'),\n# _rowgen2('sktoken'),\n ]\n\nclass RunningExCostSchema(CostSchema):\n rows = [\n ('Social Unfiltered', 'incremental'),\n ('Social Filtered', 'filtered'),\n ]\n cols = [\n ('make_user', 'make_user', None),\n ('make_group', 'make_group', None),\n ('follow', 'follow', None),\n ('unfollow', 'unfollow', None),\n ('join_group', 'join_group', None),\n ('leave_group', 'leave_group', None),\n ('change_loc', 'change_loc', None),\n ('do_query', 'do_query', None),\n ]\n\nclass LamutexspecCostSchema(CostSchema):\n rows = [\n ('lamutex Unfiltered', 'lamutex'),\n# ('lamutex opt1 Unfiltered', 'lamutex opt1'),\n ('lamutex opt2 Unfiltered', 'lamutex optimized'),\n ]\n cols = [\n ('Query_0', 'Query', None),\n ('Update__PReceivedEvent_0', 'Rec Request', None),\n ('Update__PReceivedEvent_1', 'Rec Release', None),\n ('Update__PReceivedEvent_2', 'Rec Ack', None),\n ]\nclass LamutexorigCostSchema(CostSchema):\n rows = [\n ('lamutex orig Unfiltered', 'lamutex orig'),\n ]\n cols = [\n ('Query_0', 'Query 1', None),\n ('Query_1', 'Query 2', None),\n ('Query_2', 'Query 3', None),\n ('Update_P_q3', 'Update request queue', None),\n ('Update_P_q4', 'Update request queue', None),\n ('Update_P_q5', 'Update request queue', None),\n ('Update_P_q6', 'Update request queue', None),\n ('Update__PReceivedEvent_0', 'Rec Ack', None),\n ]\n\nclass OOPSLA15Schema(OrigIncFilterSchema):\n \n # (Not a method.)\n def _rowgen(dispname, name):\n return ([name + ' Input', name + ' Unfiltered', name + ' Filtered'],\n dispname)\n \n def _rowgen2(dispname, name):\n return ([name + ' Input', name + ' Unfiltered (obj)',\n name + ' Filtered (obj)'],\n dispname)\n \n rows = [\n _rowgen('Running', 'Social'),\n _rowgen('JQLbench1', 'JQL 1'),\n _rowgen('JQLbench2', 'JQL 2'),\n _rowgen('JQLbench3', 'JQL 3'),\n _rowgen('Wifi', 'Wifi'),\n _rowgen('Auth', 'Auth'),\n (['CoreRBAC Input', 'CoreRBAC Unfiltered (CA)',\n 'CoreRBAC Filtered (CA)'],\n 'Access'),\n _rowgen('CoreRBAC', 'CoreRBAC'),\n _rowgen('SSD', 'Constr. RBAC'),\n \n (['lamutex orig Input', 'lamutex orig Unfiltered',\n 'lamutex orig Filtered'],\n 'La mutex'),\n _rowgen('RA mutex', 'ramutex'),\n _rowgen2('RA token', 'ratoken'),\n# _rowgen2('SK token', 'sktoken'),\n _rowgen('CR leader', 'crleader'),\n _rowgen('HS leader', 'hsleader'),\n _rowgen('2P commit', '2pcommit'),\n _rowgen2('DS crash', 'dscrash'),\n _rowgen('CL Paxos', 'clpaxos'),\n ]\n\nstats = StatsDB(STATS_FILE)\nrunningex_schema = RunningExSchema(stats.allstats)\ncomparison_schema = ComparisonSchema(stats.allstats)\napplications_schema = ApplicationsSchema(stats.allstats)\ndistalgo_schema = DistalgoSchema(stats.allstats)\nrunningex_costschema = RunningExCostSchema(stats.allstats)\nlamutexspec_costschema = LamutexspecCostSchema(stats.allstats)\nlamutexorig_costschema = LamutexorigCostSchema(stats.allstats)\noopsla15_schema = OOPSLA15Schema(stats.allstats)\n\nrunningex_schema.save_csv(STATS_DIR + 'stats-runningex.csv')\ncomparison_schema.save_csv(STATS_DIR + 'stats-comparison.csv')\napplications_schema.save_csv(STATS_DIR + 'stats-applications.csv')\ndistalgo_schema.save_csv(STATS_DIR + 'stats-distalgo.csv')\noopsla15_schema.save_csv(STATS_DIR + 'stats-oopsla15.csv')\nrunningex_costschema.save_csv(STATS_DIR + 'stats-runninex_cost.csv')\nlamutexspec_costschema.save_csv(STATS_DIR + 'stats-lamutexspec_cost.csv')\nlamutexorig_costschema.save_csv(STATS_DIR + 'stats-lamutexorig_cost.csv')\n\n#print(runningex_schema.to_ascii())\n#print(comparison_schema.to_ascii())\n#print(applications_schema.to_ascii())\n#print(distalgo_schema.to_ascii())\n#print(oopsla15_schema.to_ascii())\n\n#print(runningex_costschema.to_ascii())\n#print(lamutexspec_costschema.to_ascii())\n#print(lamutexorig_costschema.to_ascii())\n\n#session = Session(stats)\n#Session.interact(stats, name='Social Unfiltered')\n#session = Session(stats, name='lamutex Unfiltered')\n#session.cmd_showcosts()\n#session.interact()\n" }, { "alpha_fraction": 0.625, "alphanum_fraction": 0.6253350973129272, "avg_line_length": 30.744680404663086, "blob_id": "8ac5ba704d0484faee42c9807d8330070ee3be36", "content_id": "417e3dca228382b6be7afd3562c51c74dd0d2451", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2984, "license_type": "no_license", "max_line_length": 76, "num_lines": 94, "path": "/incoq/compiler/tup/tupletrans.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Flattening of tuples in comprehensions.\n\nEach nested tuple on the LHS of an enumerator gets replaced.\nThe top-level is not replaced, nor are any tuples anywhere else\nin the comprehension. Fresh variables take their place, and new\nenumerators over tuple relations are inserted immediately after\ntheir use.\n\"\"\"\n\n\n__all__ = [\n 'flatten_tuples_comp',\n 'flatten_tuples_allcomps',\n 'flatten_tuples',\n]\n\n\nfrom incoq.util.collections import OrderedSet\nimport incoq.compiler.incast as L\n\nfrom .tuprel import make_trel, get_trel\n\n\nclass TupleFlattener(L.NodeTransformer):\n \n def __init__(self, tupvar_namer):\n super().__init__()\n self.tupvar_namer = tupvar_namer\n self.trels = OrderedSet()\n \n def process(self, tree):\n self.new_clauses = []\n tree = super().process(tree)\n return tree, self.new_clauses\n \n def visit_Enumerator(self, node):\n # If LHS is a tuple, skip over the top level.\n # Either way, don't descend into RHS.\n if isinstance(node.target, L.Tuple):\n elts = self.visit(node.target.elts)\n new_target = node.target._replace(elts=elts)\n return node._replace(target=new_target)\n else:\n new_target = self.generic_visit(node.target)\n return node._replace(target=new_target)\n \n def visit_Tuple(self, node):\n # No need to recurse, that's taken care of by the caller\n # of this visitor.\n tupvar = self.tupvar_namer.next()\n arity = len(node.elts)\n trel = make_trel(arity)\n elts = (L.sn(tupvar),) + node.elts\n new_cl = L.Enumerator(L.tuplify(elts, lval=True),\n L.ln(trel))\n self.new_clauses.append(new_cl)\n self.trels.add(trel)\n return L.sn(tupvar)\n\ndef flatten_tuples_comp(comp):\n \"\"\"Flatten away nested tuples. Return the modified comprehension\n and an OrderedSet of tuple relations used.\n \"\"\"\n tupvar_namer = L.NameGenerator(fmt='_tup{}', counter=1)\n flattener = TupleFlattener(tupvar_namer)\n comp = L.rewrite_compclauses(comp, flattener.process,\n after=True, enum_only=True, recursive=True)\n return comp, flattener.trels\n\ndef flatten_tuples_allcomps(tree):\n \"\"\"Flatten nested tuples in all comprehensions. Return the modified\n tree and an OrderedSet of all tuple relations used.\n \"\"\"\n class Flattener(L.QueryMapper):\n \n def process(self, tree):\n self.trels = OrderedSet()\n tree = super().process(tree)\n return tree, self.trels\n \n def map_Comp(self, node):\n new_comp, new_trels = flatten_tuples_comp(node)\n self.trels.update(new_trels)\n return new_comp\n \n return Flattener.run(tree)\n\n\ndef flatten_tuples(tree):\n \"\"\"Flatten all nested tuples in a program. Return the modified\n program.\n \"\"\"\n tree, _trels = flatten_tuples_allcomps(tree)\n return tree\n" }, { "alpha_fraction": 0.5552552938461304, "alphanum_fraction": 0.5568488836288452, "avg_line_length": 25.099456787109375, "blob_id": "ef791f7fca7da009f1a3778e8e08a70d9698d27a", "content_id": "62dec4d3ebb5f6cc7d8a6fe21997949b2f0410c4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14433, "license_type": "no_license", "max_line_length": 79, "num_lines": 553, "path": "/incoq/runtime/runtimelib.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "###############################################################################\n# runtimelib.py #\n# Author: Jon Brandvein #\n###############################################################################\n\n\"\"\"This module includes features needed for input programs to be\nexecutable as Python code, such as straightforward implementations of\nqueries. It also includes data type definitions and utilities for\ninstrumentation.\n\"\"\"\n\n\n__all__ =[\n 'OPTIONS',\n 'QUERYOPTIONS',\n 'NODEMAND',\n 'allow_profile',\n \n 'setmatch',\n 'count',\n 'len_',\n 'max2',\n 'min2',\n \n 'get_structure_sizes',\n 'get_total_structure_size',\n \n 'Type',\n 'Obj',\n 'Set',\n 'RCSet',\n 'Map',\n \n 'MSet',\n 'FSet',\n 'MAPSet',\n \n 'Tree',\n \n 'LRUSet',\n]\n\n\nfrom collections import Counter\nimport builtins\n\ntry:\n from bintrees import FastAVLTree\n HAVE_TREES = True\nexcept ImportError:\n HAVE_TREES = False\n\nfrom .lru import LRUTracker\n\n\n# ---- Helpers ----\n\ndef tupify(items):\n \"\"\"Return a tuple of the given items, or just the singular value\n if items is a singleton list.\n \"\"\"\n if len(items) == 1:\n return items[0]\n else:\n return tuple(items)\n\ndef display_helper(item):\n \"\"\"Formatting helper that does not expand containers.\"\"\"\n if not isinstance(item, Type):\n return str(item)\n else:\n return '<{} at {}>'.format(\n type(item).__name__,\n hex(id(item)))\n\n\n# ---- Instrumentation utilities ----\n\ndef get_structure_sizes(namespace):\n \"\"\"Return a dictionary mapping from structure name to size, for each\n structure in a module's global namespace. For set-like objects, the\n structure size is its length. For image maps, it's the number of\n image sets plus the sum of their sizes.\n \"\"\"\n return {name: obj.get_structure_size()\n for name, obj in namespace.items()\n if isinstance(obj, Type)}\n\ndef get_total_structure_size(namespace):\n \"\"\"Return the total structure size taken by all structures in a\n module's global namespace.\n \"\"\"\n return sum(get_structure_sizes(namespace).values())\n\n\n# ---- Directive helpers ----\n\n# Used to specify options in the transformation.\n# No effect at runtime.\n\ndef OPTIONS(*args, **kargs):\n pass\n\ndef QUERYOPTIONS(*args, **kargs):\n pass\n\ndef NODEMAND(value):\n return value\n\ndef allow_profile(f):\n \"\"\"If \"profile\" is provided in builtins, alias for that.\n Otherwise, no-op. This is useful for writing kernprof-able\n scripts that also run without profiling.\n \"\"\"\n profile = getattr(builtins, 'profile', None)\n return profile(f) if profile else f\n\n\n# ---- Batch implementations of queries ----\n\ndef _tuplematch(item, mask, key):\n \"\"\"Match a single tuple against a mask and key. If it matches,\n return a pair of True and a list of the unbound values. If it\n doesn't, return a pair of False and None.\n \"\"\" \n b_parts = []\n u_parts = []\n for part, c in zip(item, mask):\n if c == 'b':\n b_parts.append(part)\n elif c == 'u':\n u_parts.append(part)\n elif c.isdigit() and c != '0':\n n = int(c)\n if part != item[n - 1]:\n return False, None\n elif c == 'w':\n pass\n else:\n assert()\n \n if tupify(b_parts) == key:\n return True, u_parts\n else:\n return False, None\n\ndef setmatch(rel, mask, key):\n \"\"\"Tuple set (relation) pattern matching.\"\"\"\n \n # Normalize mask string.\n mask = {'out': 'bu', 'in': 'ub'}.get(mask, mask)\n \n assert all(c == 'b' or\n c == 'u' or\n (c.isdigit() and c != '0') or\n c == 'w'\n for c in mask)\n \n # Special case: If the mask is just 'w', always succeed\n # with the empty tuple. (Not sure if this is formally\n # the right thing to do or not.)\n if mask == 'w':\n return {()}\n \n result = Set()\n for item in rel:\n # Skip elements that are not tuples or that are tuples\n # with the wrong length.\n if not (isinstance(item, tuple) and\n len(item) == len(mask)):\n continue\n \n does_match, u_parts = _tuplematch(item, mask, key)\n \n if does_match:\n result.add(tupify(u_parts))\n \n return result\n\n# Aggregate query \"sum\" is already available as Python function.\n# Aggregate query \"count\" is just Python len().\ncount = len\n\n# Version of len that's not interpreted as a query.\nlen_ = len\n\n# Max and min aggregates that operate on zero or more scalar arguments.\n# Each argument is either an orderable value or None. The highest non-\n# None value is returned, or None if all given values are None (or if\n# no arguments were given.)\ndef max2(*args):\n res = None\n for x in args:\n if x is None:\n continue\n if res is None or x > res:\n res = x\n return res\n\ndef min2(*args):\n res = None\n for x in args:\n if x is None:\n continue\n if res is None or x < res:\n res = x\n return res\n\n\n# ---- Types ----\n\nclass Type:\n \n \"\"\"Base class for types.\"\"\"\n \n def __getstate__(self):\n raise NotImplementedError\n \n def __setstate__(self, state):\n raise NotImplementedError\n \n # Make sure our subclasses don't accidentally inherit\n # the pickling implementation of built-in types unless\n # explicitly requested.\n __reduce__ = object.__reduce__\n __reduce_ex__ = object.__reduce_ex__\n \n # Ensure identity semantics.\n __hash__ = object.__hash__\n __eq__ = object.__eq__\n \n def get_structure_size(self):\n raise NotImplementedError\n\n\nclass Obj(Type):\n \n \"\"\"Generic object type.\"\"\"\n \n # Note that the built-in \"object\" class doesn't support field\n # assignment.\n \n def __repr__(self):\n if hasattr(self, 'name'):\n return 'Obj(' + self.name + ')'\n else:\n return super().__repr__()\n \n def get_structure_size(self):\n return 1\n \n def __getstate__(self):\n return self.__dict__\n \n def __setstate__(self, state):\n self.__dict__.update(state)\n\n\nclass Set(Type, set):\n \n \"\"\"Set type.\"\"\"\n \n # This is implemented by directly inheriting from set, for speed.\n # Each wrapper call to a Python-level function introduces\n # noticeable overhead.\n \n def __repr__(self):\n return '{' + ', '.join(display_helper(item) for item in self) + '}'\n \n # Standard update operations, aliased in order to bypass\n # transformation.\n \n def _add(self, elem):\n super().add(elem)\n \n def _remove(self, elem):\n super().remove(elem)\n \n # Macro updates.\n \n # update(), intersection_update(), difference_update(),\n # symmetric_difference_update(), and clear() are all\n # defined by the built-in set class.\n \n def assign_update(self, other):\n if self is not other:\n self.clear()\n self.update(other)\n \n def get_structure_size(self):\n return len(self)\n \n def __getstate__(self):\n return set(self)\n \n def __setstate__(self, state):\n self.update(state)\n\n\nclass RCSet(Type):\n \n \"\"\"Reference-counted set type.\"\"\"\n \n # We're gonna have some constant factor overhead when using this\n # class. It relies on collections.Counter, which is implemented\n # in Python. It wraps this class rather than extending an existing\n # collection. It has assertions, and uses built-in functions like\n # len() instead of calling special methods directly like __len__().\n # This could be mitigated by making alternate streamlined\n # definitions that are controlled with an INSTRUMENTING flag.\n #\n # (Note that an INSTRUMENTING flag would have to control how\n # a method is defined at class definition time, not the control\n # flow within a method. Otherwise checking the flag alone wastes\n # precious time.)\n \n # WISHLIST: Possible optimization: cut out indirection of __iter__()\n # by making this class subset set() and using the builtin\n # set.__iter__(). Would presumably save a little overhead for each\n # block of iterations over this set, especially useful when the set\n # is very small (so overhead is larger by comparison).\n \n def __init__(self):\n super().__init__()\n self.elems = Counter()\n \"\"\"Map from element to its reference count.\"\"\"\n \n def __repr__(self):\n return '{' + ', '.join(display_helper(item) for item in self) + '}'\n \n def __iter__(self):\n return iter(self.elems)\n \n def __len__(self):\n return len(self.elems)\n \n def __contains__(self, value):\n return value in self.elems\n \n def getref(self, value):\n \"\"\"Return the reference count of an existing element.\"\"\"\n return self.elems[value]\n \n def incref(self, value):\n \"\"\"Increment the refcount of an existing element.\"\"\"\n assert value in self.elems\n self.elems[value] += 1\n \n def decref(self, value):\n \"\"\"Decrement the refcount of an existing element where the\n refcount is greater than 1.\"\"\"\n assert self.elems[value] > 1\n self.elems[value] -= 1\n \n def add(self, value):\n \"\"\"Strictly add an element with refcount 1.\"\"\"\n assert value not in self.elems\n self.elems[value] = 1\n \n def remove(self, value):\n \"\"\"Strictly remove an element with refcount 1.\"\"\"\n assert self.elems[value] == 1\n del self.elems[value]\n \n # Note: Not all macro updates are currently provided for RCSet.\n \n def clear(self):\n self.elems.clear()\n \n def elements(self):\n return self.elems.elements()\n \n def get_structure_size(self):\n return len(self)\n \n def __getstate__(self):\n return self.elems\n \n def __setstate__(self, state):\n self.elems = state\n\n\nclass Map(Type, dict):\n \n \"\"\"Map type.\"\"\"\n \n def __repr__(self):\n return '{' + ', '.join(display_helper(k) + ': ' + str(v)\n for k, v in self.items()) + '}'\n \n # Macro updates.\n \n def mapassign_update(self, other):\n if self is not other:\n self.clear()\n self.update(other)\n \n mapclear = dict.clear\n \n _NO_DEFAULT = object()\n def singlelookup(self, key, default=_NO_DEFAULT):\n \"\"\"If this method is used, the value must be a singleton set.\n Return the singular element of the set.\n \"\"\"\n try:\n image = self[key]\n except KeyError:\n if default is not self._NO_DEFAULT:\n return default\n else:\n raise\n \n assert len(image) == 1\n return next(iter(image))\n \n def get_structure_size(self):\n total = len(self)\n for v in self.values():\n if isinstance(v, set):\n total += len(v)\n elif isinstance(v, (set, Set, RCSet)):\n total += v.get_structure_size()\n return total\n \n def __getstate__(self):\n return dict(self)\n \n def __setstate__(self, state):\n self.update(state)\n\n\nclass PairSet(Set):\n \n \"\"\"Special set for modeling object-domain relationships. Updates to\n these sets will also trigger corresponding updates to the\n represented object-domain values.\n \"\"\"\n\n\nclass MSet(PairSet):\n \n \"\"\"M-set for set membership.\"\"\"\n \n def add(self, pair):\n cont, item = pair\n assert not isinstance(cont, PairSet)\n cont.add(item)\n super().add(pair)\n \n def remove(self, pair):\n cont, item = pair\n cont.remove(item)\n super().remove(pair)\n\n\nclass FSet(PairSet):\n \n \"\"\"F-set for object fields.\"\"\"\n \n def __init__(self, field):\n super().__init__()\n self.field = field\n \n def add(self, pair):\n cont, item = pair\n setattr(cont, self.field, item)\n super().add(pair)\n \n def remove(self, pair):\n cont, _item = pair\n delattr(cont, self.field)\n super().remove(pair)\n\nclass MAPSet(PairSet):\n \n \"\"\"MAP-set for maps.\"\"\"\n \n def add(self, e):\n map, key, value = e\n assert not isinstance(map, PairSet)\n map[key] = value\n super().add(e)\n \n def remove(self, e):\n map, key, value = e\n del map[key]\n super().remove(e)\n\n\nif HAVE_TREES:\n class Tree(FastAVLTree):\n \n \"\"\"Tree subclass with non-strict min/max operations, that\n return just the key (not the item pair). This simplifies\n our generated maintenance code; the user-visible min/max\n operations still require a non-empty set.\n \"\"\"\n \n def __min__(self):\n if len(self) == 0:\n return None\n else:\n return super().__min__()[0]\n \n def __max__(self):\n if len(self) == 0:\n return None\n else:\n return super().__max__()[0]\n\nelse:\n def Tree(*args, **kargs):\n raise NotImplementedError(\n 'Could not import bintrees library; Incrementalized min/max '\n 'aggregates unavailable')\n\n\nclass LRUSet(Set):\n \n \"\"\"A Set augmented with cache access operations. The cache does\n not change the semantics of additions and removals; it operates\n independently and must be queried separately.\n \"\"\"\n \n def __init__(self):\n super().__init__()\n self.cache = LRUTracker()\n \n def add(self, elem):\n super().add(elem)\n self.cache.add(elem)\n \n def remove(self, elem):\n super().remove(elem)\n self.cache.remove(elem)\n \n def ping(self, elem):\n \"\"\"Ping an element already in the set, bumping it to the\n front of the LRU cache.\n \"\"\"\n self.cache.ping(elem)\n \n def peek(self):\n \"\"\"Return the element that would be removed next.\"\"\"\n return self.cache.peek()\n \n def __getstate__(self):\n return (set(self), self.cache)\n \n def __setstate__(self, state):\n contents, cache = state\n self.update(contents)\n self.cache = cache\n" }, { "alpha_fraction": 0.6851851940155029, "alphanum_fraction": 0.7037037014961243, "avg_line_length": 17, "blob_id": "6f499944baded2a2cafe12851f35c7869753a881", "content_id": "aea8ba854da22528e213927dc0e5fa84060e818e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 54, "license_type": "no_license", "max_line_length": 28, "num_lines": 3, "path": "/experiments/jql/jql_3_orig.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from .jql_3_in import *\n\ndo_query_nodemand = do_query\n" }, { "alpha_fraction": 0.46875, "alphanum_fraction": 0.5192307829856873, "avg_line_length": 15.640000343322754, "blob_id": "bc1a5359fe37de8d910afa0fbe9696aa79e52a3c", "content_id": "b4ce393ccd324b0294776536de8604b9a3fc47cd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 416, "license_type": "no_license", "max_line_length": 58, "num_lines": 25, "path": "/incoq/tests/programs/aggr/nested/compdem_in.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "# Demand-driven comprehension with an aggregate.\n\nfrom incoq.runtime import *\n\nOPTIONS(\n default_impl = 'inc',\n)\n\nS = Set()\nE = Set()\n\nfor e in [1, 2, 3, 4]:\n S.add(e)\n\nfor e in [(1, 5), (1, 8), (1, 15), (2, 9), (2, 18)]:\n E.add(e)\n\nQUERYOPTIONS(\n '{y for x2, y in E if x2 == x if y < sum(S)}',\n impl = 'dem',\n uset_mode = 'all',\n)\n\nx = 1\nprint(sorted({y for x2, y in E if x2 == x if y < sum(S)}))\n" }, { "alpha_fraction": 0.5226453542709351, "alphanum_fraction": 0.5244921445846558, "avg_line_length": 32.15160369873047, "blob_id": "7a43e1b177f9babaee5b2ea683a2813144005b2d", "content_id": "c1bd163b3133a1ec7c6f032598c85bd22e7265e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11371, "license_type": "no_license", "max_line_length": 73, "num_lines": 343, "path": "/incoq/compiler/set/mask.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Masks for pattern-based set retrieval.\"\"\"\n\n\"\"\"\nA mask is the structure underlying a tuple pattern. A mask with\nk parts can be applied to a tuple or relation of arity k. In the\nbasic case, each part is either 'b' or 'u' indicating \"bound\" or\n\"unbound\" respectively. Given a mask and a key that consists of\nvalues for each bound part, a pattern match returns the values\nfor each unbound part in the corresponding tuple(s).\n\nA wildcard part is represented by 'w'. It acts as 'u', but its\ncorresponding value is not returned as part of the match.\n\nAn equality part is an integer (represented here as a decimal\nstring). It identifies the index of a previous part (starting at 1)\nthat this part must equal. Equality parts do not have corresponding\nentries in the key.\n\nExample match:\n\n Mask: bubw12u\n Key: (a, c)\n Tuple: (a, b, c, d, a, b, z)\n Result: (b, z)\n\n\"\"\"\n\n### TODO: document keymasks\n\n\n__all__ = [\n 'Mask',\n 'AuxmapSpec',\n]\n\n\nfrom simplestruct import Struct, Field\nfrom simplestruct.type import checktype, checktype_seq\n\nimport incoq.compiler.incast as L\n\n\nclass Mask(Struct):\n \n \"\"\"Pattern mask.\"\"\"\n \n parts = Field()\n \n @classmethod\n def from_vars(cls, vars, bindenv, wildvars=None):\n \"\"\"Construct the mask underlying a variable pattern tuple\n with a given environment of bound variables.\n \n vars is a sequence of variable identifiers, or '_' for\n wildcards. bindenv is a set of variables that are considered\n bound. The returned mask can be used with the bound variables\n to obtain matching values for the unbound variables.\n \n If wildcars is given, it must be disjoint from bindenv.\n Variables in wildvars will be replaced by wildcards.\n \"\"\"\n parts = []\n for i, v in enumerate(vars):\n if v == '_':\n parts.append('w')\n elif v in vars[:i]:\n n = vars.index(v) + 1\n parts.append(str(n))\n elif v in bindenv:\n parts.append('b')\n elif wildvars is not None and v in wildvars:\n parts.append('w')\n else:\n parts.append('u')\n \n return cls(parts)\n \n @classmethod\n def from_proj(cls, vars):\n \"\"\"Construct a projection mask from a sequence of variables\n and wildcard symbols.\n \"\"\"\n parts = []\n for i, v in enumerate(vars):\n if v == '_':\n parts.append('w')\n else:\n parts.append('b')\n \n return cls(parts)\n \n @classmethod\n def from_keylen(cls, n):\n \"\"\"Construct a keymask with n many parameter parts.\"\"\"\n return cls('b' * n + 'u')\n \n def __new__(cls, parts):\n \"\"\"Construct from a string, or a sequence of strings.\"\"\"\n # Validate parts.\n try:\n checktype(parts, str)\n except TypeError:\n checktype_seq(parts, str)\n parts = tuple(parts)\n \n if not all(c == 'b' or\n c == 'u' or\n (c.isdigit() and c != '0') or\n c == 'w'\n for c in parts):\n raise ValueError('Invalid pattern mask: ' + ''.join(parts))\n \n if any(c.isdigit() and len(c) > 1\n for c in parts):\n raise ValueError('Equality constraints with index > 9 '\n 'not supported')\n \n if any(c.isdigit() and int(c) - 1 >= i\n for i, c in enumerate(parts)):\n raise ValueError('Equality constraint must refer to smaller '\n 'index than own occurrence')\n \n return super().__new__(cls, parts)\n \n def __init__(self, parts):\n # Set derived data.\n \n if self.parts == ('b', 'u'):\n self.maskstr = 'out'\n elif self.parts == ('u', 'b'):\n self.maskstr = 'in'\n else:\n self.maskstr = ''.join(self.parts)\n \"\"\"String (possibly non-formulaic) representation of mask.\n Valid for use as an identifier.\n \"\"\"\n \n self.is_allbound = all(c == 'b' or c.isdigit()\n for c in self.parts)\n \"\"\"True if fully bound. Equality-constrained parts count as\n bound.\n \"\"\" \n \n self.is_allunbound = all(c == 'u' or c == 'w'\n for c in self.parts)\n \"\"\"True if fully unbound. Wildcards count as unbound.\"\"\"\n \n self.has_wildcards = any(c == 'w' for c in self.parts)\n \"\"\"True if there are wildcards.\"\"\"\n \n self.has_equalities = any(c.isdigit()\n for c in self.parts)\n \"\"\"True if there are any equalities.\"\"\"\n \n self.is_mixed = not(self.is_allbound or self.is_allunbound)\n \"\"\"True if neither fully bound nor unbound.\"\"\"\n \n \n p = self.parts\n n = len(p)\n self.is_keymask = (all(c == 'b' for c in p[:n-1]) and\n p[n-1] == 'u')\n \"\"\"True if has form bbb...bu.\"\"\"\n \n # Lookup mask: True if has form bbb...bu...uuu.\n if 'u' in p:\n first_u = p.index('u')\n self.is_lookupmask = (all(c == 'b' for c in p[:first_u]) and\n all(c == 'u' for c in p[first_u:]))\n self.lookup_arity = first_u\n else:\n self.is_lookupmask = False\n self.lookup_arity = None\n \n def __str__(self):\n return self.maskstr\n \n def __repr__(self):\n return type(self).__name__ + '(' + self.maskstr + ')'\n \n def __len__(self):\n return len(self.parts)\n \n def make_node(self):\n \"\"\"Return a Str node with the mask string.\"\"\"\n # Would break if we allowed equality indices > 9.\n return L.Str(''.join(self.parts))\n \n def split_vars(self, vars):\n \"\"\"Given a sequence of variables, determine which ones are\n bound and unbound according to this pattern.\n \n Return a triple of a tuple of bound vars, a tuple of unbound\n vars, and a tuple of equality pairs that are necessary and\n sufficient to satisfy the equality constraints.\n \n Variables corresponding to bound components and unbound\n components are appended to the respective lists. Variables\n corresponding to wildcard and equality parts are skipped.\n \"\"\"\n if len(vars) != len(self.parts):\n raise ValueError('Variable list of wrong length ({}) '\n 'for mask ({})'.format(\n len(vars), len(self.parts)))\n \n boundvars = []\n unboundvars = []\n eqs = []\n for v, c in zip(vars, self.parts):\n if c == 'b':\n boundvars.append(v)\n elif c == 'u':\n unboundvars.append(v)\n elif c.isdigit():\n n = int(c) - 1\n eqs.append((vars[n], v))\n elif c == 'w':\n pass\n \n return tuple(boundvars), tuple(unboundvars), tuple(eqs)\n \n def make_projkey(self, val):\n \"\"\"If this mask has no 'u' components, given a value for a tuple,\n construct a key expression out of the non-wildcard components.\n \"\"\"\n components = []\n for i, c in enumerate(self.parts):\n if c == 'b':\n # val[i]\n expr = L.Subscript(val, L.Index(L.Num(i)), L.Load())\n components.append(expr)\n elif c == 'w':\n pass\n elif c.isdigit():\n pass\n else:\n assert()\n return L.tuplify(components)\n \n def breakkey(self, node):\n \"\"\"For a keymask, break a key node into a tuple of its parts.\"\"\"\n assert self.is_keymask\n \n n = len(self.parts) - 1\n if n == 1:\n return (node,)\n else:\n assert(isinstance(node, L.Tuple))\n assert(len(node.elts) == n)\n return node.elts\n \n def make_delta_mask(self):\n \"\"\"Form a new mask for using a delta check on this key.\n Replace all 'u' with 'b', leaving other parts alone.\n \"\"\"\n new_parts = []\n for c in self.parts:\n if c == 'u':\n new_parts.append('b')\n else:\n new_parts.append(c)\n return Mask(new_parts)\n \n def make_param_proj_mask(self):\n \"\"\"Form a new mask that projects out everything that's\n not a bound (not counting equality components). E.g.\n turn 'bbuw' into 'uuww'.\n \"\"\"\n new_parts = []\n for c in self.parts:\n if c == 'b':\n new_parts.append('u')\n else:\n new_parts.append('w')\n return Mask(new_parts)\n \n def make_interkey_mask(self, vars, bindenv):\n \"\"\"Form a new mask that goes from one partition of given key\n values to the other partition of remaining key values. vars is\n a list of variable names of length equal to the number of bound\n components in this mask -- i.e. keys. bindenv is a set of key\n vars that are to be considered bound for the purposes of the\n new mask. The resulting mask goes form the vars that are\n in bindenv, to the vars that are not in bindenv, relative to\n the same relation as the one that this current mask indexes.\n Notably, equality and wildcards in this mask are preserved\n into the new mask.\n \n Put another way: Let m1 be a mask with key variables (bound\n positions) K. Let B be those variables of K that are also in\n the bindenv, and let m2 be formed by calling this function on\n m1 with bindenv. Then the result of matching m2 with B in a\n relation R should be the set of all values for key variables\n in K - B, such that the result of matching m1 with B and those\n values over R is non-empty. \n \"\"\"\n assert len(vars) == len([c for c in self.parts if c == 'b'])\n \n out_parts = []\n \n it = iter(vars)\n for c in self.parts:\n # A key position ('b') stays bound if it is in bindenv,\n # or else becomes an unbound.\n if c == 'b':\n v = next(it)\n if v in bindenv:\n out_parts.append('b')\n else:\n out_parts.append('u')\n # Unbounds become wildcards. Wildcards stay wildcards.\n elif c in ['u', 'w']:\n out_parts.append('w')\n # Equality constraints stay the same.\n elif c.isdigit():\n out_parts.append(c)\n else:\n assert()\n \n return Mask(out_parts)\n\n\n# Common instances.\nMask.BB = Mask('bb')\nMask.OUT = Mask('bu')\nMask.IN = Mask('ub')\nMask.UU = Mask('uu')\nMask.B1 = Mask('b1')\nMask.BW = Mask('bw')\nMask.U = Mask('u')\n\n\nclass AuxmapSpec(Struct):\n \n rel = Field(str)\n mask = Field(Mask)\n \n def __init__(self, rel, mask):\n self.lookup_name = '{}_{}'.format(self.rel, self.mask.maskstr)\n self.map_name = '_m_' + self.lookup_name\n \n def __str__(self):\n return self.lookup_name\n" }, { "alpha_fraction": 0.533923327922821, "alphanum_fraction": 0.533923327922821, "avg_line_length": 32.5945930480957, "blob_id": "aa6e6e49c42875f1a1aa410fd48c7221556c0618", "content_id": "01b13b3e22a0f6457e33087b37dd910a40f594a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3729, "license_type": "no_license", "max_line_length": 79, "num_lines": 111, "path": "/incoq/util/type.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "###############################################################################\n# type.py #\n# Author: Jon Brandvein #\n###############################################################################\n\n\"\"\"Utilities for facilitating type checking.\"\"\"\n\n# Spare me the \"It's not the Python way\" lectures. I've lost too much\n# time to type errors in this code base.\n\n# In the future, this might be replaceable by a library that does type\n# checking based on function annotations.\n\n# TODO: See if I can make exceptions exclude the innermost stack\n# frame(s) so it appears the errors are being raised where I want them\n# to.\n\n\n__all__ = [\n 'checktype',\n 'checktype_seq',\n 'checksubclass',\n 'checksubclass_seq',\n \n 'TypeCase',\n]\n\n\ndef strval(val):\n \"\"\"Helper for error messages.\"\"\"\n if isinstance(val, type):\n return val.__name__ + ' class'\n else:\n return type(val).__name__ + ' object'\n\ndef checktype(val, typ):\n \"\"\"Raise TypeError if val is not of type typ.\"\"\"\n if not isinstance(val, typ):\n exp = typ.__name__\n got = strval(val)\n raise TypeError('Expected {}; got {}'.format(exp, got))\n\ndef checktype_seq(seq, typ):\n \"\"\"Raise TypeError if any element of seq is not of type typ.\n \n As a special case, a string does not count as a sequence of strings\n (to catch a common error case). \n \"\"\"\n exp = typ.__name__\n \n # Make sure we have a sequence.\n try:\n iterator = iter(seq)\n # Generators aren't sequences. This avoids a confusing case\n # where we consume a generator by type-checking it, and leave\n # only an exhausted iterator for the user code.\n len(seq)\n except (TypeError, AssertionError):\n got = strval(seq)\n raise TypeError('Expected sequence of {}; '\n 'got {} instead of sequence'.format(exp, got))\n \n if typ is str:\n if isinstance(seq, str):\n raise TypeError('Expected non-string sequence of str; '\n 'got string')\n \n for item in iterator:\n if not isinstance(item, typ):\n got = strval(item)\n raise TypeError('Expected sequence of {}; '\n 'got sequence with {}'.format(exp, got))\n\ndef checksubclass(val, cls):\n \"\"\"Raise TypeError if val is not a subclass of cls.\"\"\"\n if not isinstance(val, type) or not issubclass(val, cls):\n exp = cls.__name__\n got = strval(val)\n raise TypeError('Expected subclass of {}; got {}'.format(exp, got))\n\ndef checksubclass_seq(seq, cls):\n \"\"\"Raise TypeError if any element of seq is not a subclass of cls.\"\"\"\n exp = cls.__name__\n \n try:\n iterator = iter(seq)\n except TypeError:\n got = strval(seq)\n raise TypeError('Expected sequence of subclasses of {}; '\n 'got {} instead of sequence'.format(exp, got))\n \n for item in iterator:\n if not isinstance(item, type) or not issubclass(item, cls):\n got = strval(item)\n raise TypeError('Expected sequence of subclasses of {}; '\n 'got sequence with {}'.format(exp, got))\n\n\nclass TypeCase:\n \n \"\"\"Mixin for unittest.\"\"\"\n \n def assertTypeError(self, expected, sequence=False, subclass=False):\n exp_words = ['Expected',\n 'sequence' if sequence else '',\n 'subclass' if subclass else '',\n expected.__name__,\n 'got']\n exp_msg = '.*'.join(exp_words)\n \n return self.assertRaisesRegex(TypeError, exp_msg)\n" }, { "alpha_fraction": 0.5766049027442932, "alphanum_fraction": 0.5781233906745911, "avg_line_length": 37.19599914550781, "blob_id": "d21dd302e638a85e25428c03f33d875416e80b84", "content_id": "d410e202bcdff4cf00a4d3a6a60339a9fa7bdfa7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 19098, "license_type": "no_license", "max_line_length": 75, "num_lines": 500, "path": "/incoq/compiler/cost/interact.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Features for simplifying costs based on user-supplied\nadditional information.\n\"\"\"\n\n\n__all__ = [\n 'add_domain_names',\n 'reinterpret_cost',\n]\n\n\nfrom functools import partial\n\nfrom incoq.util.unify import unify\nimport incoq.compiler.incast as L\nfrom incoq.compiler.set import Mask\nfrom incoq.compiler.comp import CompSpec\nfrom incoq.compiler.aggr import AggrSpec\n\nfrom .cost import *\n\n\ndef make_dompath_eqs(subst, roots=None):\n \"\"\"Given a domain substitution, make equations for domain path\n specifiers that begin with one of the names in roots. If roots\n is None do it for all keys in the substitution.\n \"\"\"\n eqs = []\n \n def make(var, rhs):\n if not (isinstance(rhs, tuple) and rhs[0] == '<T>'):\n eq = (var, rhs)\n eqs.append(eq)\n return\n parts = rhs[1:]\n \n subvars = [var + '.' + str(i)\n for i in range(1, len(parts) + 1)]\n eq = (var, tuple(['<T>'] + subvars))\n eqs.append(eq)\n \n for s, p in zip(subvars, parts):\n make(s, p)\n \n for k, v in subst.items():\n if roots is None or k in roots:\n make(k, v)\n \n return eqs\n\ndef add_domain_names(subst, domainnames, roots=None):\n \"\"\"Given a domain substitution and a mapping from domain paths to\n names of primitive domains, produce the augmented substitution.\n \"\"\"\n subst_eqs = list(subst.items())\n dompath_eqs = make_dompath_eqs(subst, roots)\n domainname_eqs = [(k, (v,)) for k, v in domainnames.items()]\n new_eqs = subst_eqs + dompath_eqs + domainname_eqs\n new_subst = unify(new_eqs)\n return new_subst\n\n\ndef split_resexp_vars(resexp, mask):\n \"\"\"Given a result expression of a comprehension and a mask over\n the comprehension result, determine which variables appearing in\n the result expression are bound and unbound. Return the set of\n bounds and unbounds respectively.\n \n A variable is considered bound if it occurs somewhere within any\n subexpression corresponding to a bound part of the mask, where this\n subexpression is injective. Otherwise it is unbound.\n \n For example, if the result expression is ((x, y), z) and the mask\n if 'bu', then x and y are bound and z is unbound. However, if the\n result expression were (x + y, z) then they would all be unbound.\n \n E.g., if the result expression is ((x, y), z) and the mask is\n 'bu', then x and y are bound. But if the result expression is\n (x + y, z), then no variables are bound.\n \"\"\"\n find = partial(L.VarsFinder.run, ignore_functions=True)\n boundvars = set()\n unboundvars = set()\n \n if isinstance(resexp, L.Tuple):\n boundexps, unboundexps, _ = mask.split_vars(resexp.elts)\n # Determine bound vars.\n for e in boundexps:\n # Injective expressions include simple variable names and\n # tuple trees of variable names.\n if L.is_injective(e):\n boundvars.update(find(e))\n # Determine unbound vars.\n for e in unboundexps:\n unboundvars.update(find(e))\n unboundvars.difference_update(boundvars)\n \n else:\n # Special case: If the result expression is not a tuple,\n # then the mask is either a single bound or single unbound.\n if mask == Mask('b'):\n boundvars = find(resexp)\n elif mask == Mask.U or mask == Mask('w'):\n unboundvars = find(resexp)\n else:\n assert()\n \n return boundvars, unboundvars\n\ndef get_nondet_info(spec, bound_vars):\n \"\"\"Given a comprehension and some bound variables, return\n information to help put a bound on the size of the part of\n the comprehension result that matches the bound variables.\n \n Specifically, we look at the non-determined variables. Choose\n a join order of the comprehension's clauses starting with the\n bound variables. For each clause in order, append to the result\n a triple of\n \n (iterated relation, mask, non-determined variables)\n \n where the non-determined variables are a subset of unbound vars\n in the clause that, together with the bound vars, functionally\n determine the remaining unbound vars in the clause. All the\n unbound variables in a clause become bound for future clauses.\n The mask is the lookup pattern that goes from the bound vars\n and determined unbound vars to the non-determined unbound vars.\n \n This goes on until all variables appearing in the comprehension\n result expression are bound.\n \n For each entry in the result, the newly introduced non-\n determined vars introduce a cost factor, and the overall\n comprehension's cost is bounded by the product of these factors.\n Each factor can itself be bounded in two ways. First, it can be\n considered recursively as an image set lookup over the iterated\n relation. Second, we can take all the membership constraints\n on the non-determined variables and use domain bounds for them.\n Assembling/minimizing these bounds is the caller's\n responsibility.\n \"\"\"\n goal_vars = L.VarsFinder.run(spec.resexp, ignore_functions=True)\n bound_vars = set(bound_vars)\n result = []\n \n ordering = spec.join.get_ordering(bound_vars)\n for _i, cl, _bindenv in ordering:\n # Skip the remaining clauses if we bound all the\n # variables we need to.\n if bound_vars.issuperset(goal_vars):\n break\n # Ignore condition clauses and clauses that are not over\n # a relation.\n if cl.enumrel is None:\n continue\n \n det_vars = set(cl.get_determined_vars(bound_vars))\n nondet_vars = set(cl.enumvars) - bound_vars - det_vars\n \n # Special case for lower cost bounds: If we happen to be able\n # to span the goal vars by taking some but not all of the nondet\n # vars and no det vars, then do that instead.\n if (bound_vars | nondet_vars).issuperset(goal_vars):\n to_vars = goal_vars - bound_vars\n from_vars = bound_vars\n else:\n to_vars = nondet_vars\n from_vars = bound_vars\n wild_vars = set(cl.enumvars) - from_vars - to_vars\n \n mask = Mask.from_vars(cl.enumlhs, from_vars, wild_vars)\n \n result.append((cl.enumrel, mask, to_vars))\n \n bound_vars.update(cl.enumvars)\n \n return result\n\n\nclass CostReinterpreter(CostTransformer):\n \n def __init__(self, invs,\n domain_subst, domain_sizes, domain_costs,\n *, strip_min=False):\n super().__init__()\n self.invs = invs\n self.domain_subst = domain_subst\n self.domain_sizes = domain_sizes\n self.domain_costs = domain_costs\n self.strip_min = strip_min\n \n def make_min(self, terms):\n \"\"\"If strip_min is True, avoid generating MinCosts by\n trying to pick the best option statically. Arbitrarily\n commit to one if necessary. If strip_min is False, just\n emit a MinCost. terms may include None; these are ignored.\n If terms is empty (are all Nones), return the unit cost.\n \"\"\"\n terms = [t for t in terms if t is not None]\n if len(terms) == 0:\n return UnitCost()\n \n if self.strip_min:\n cost = MinCost(terms)\n # The cost of normalization may be big enough that\n # it defeats the purpose of having strip_min in the\n # first place, but the hope is that these mins will\n # be rather small.\n cost = normalize(cost)\n if isinstance(cost, MinCost) and len(cost.terms) > 1:\n return cost.terms[0]\n else:\n return cost\n else:\n cost = MinCost(terms)\n # Simplify for the sake of trying to keep the\n # size of terms small.\n cost = Simplifier.run(cost)\n return cost\n \n def dompath_to_size(self, dompath):\n \"\"\"Produce a cost for a dompath. First check to see if there's\n a specific cost override. If there isn't, then use a bound based\n on its domain type. If this type is itself a tuple (Cartesian\n product), recurse to get the sub-domain costs. If type\n information is unavailable for this type or one of the sub-\n types, return None.\n \"\"\"\n # Use cost override if available.\n if dompath in self.domain_costs:\n return self.domain_costs[dompath]\n \n # Return None if type information is unavilable.\n if dompath not in self.domain_subst:\n return None\n dom = self.domain_subst[dompath]\n \n if isinstance(dom, tuple) and dom[0] == '<T>':\n # Cartesian product of sub-domain types.\n arity = len(dom) - 1\n subdoms = [dompath + '.' + str(i) for i in range(1, arity + 1)]\n subcosts = tuple(self.dompath_to_size(s) for s in subdoms)\n if any(c == None for c in subcosts):\n return None\n return ProductCost(subcosts)\n \n elif isinstance(dom, tuple) and len(dom) == 1:\n # Primitive domain type.\n # Check for a size for this domain.\n if dom[0] in self.domain_sizes:\n return self.domain_sizes[dom[0]]\n else:\n # Just use the name of the domain.\n return NameCost(dom[0])\n \n elif isinstance(dom, str):\n # Domain variable. We don't know what to make of it,\n # so just use its name.\n return NameCost(dom)\n \n else:\n assert()\n \n def dompaths_for_mask(self, dompath, mask):\n \"\"\"For a given mask over a dompath, return the names of the\n sub-dompaths for the unbound components. If type information\n is not available for the given dompath or any needed sub-\n dompath, return None.\n \"\"\"\n if dompath not in self.domain_subst:\n return None\n dom = self.domain_subst[dompath]\n \n subdoms = [dompath + '.' + str(i)\n for i in range(1, len(mask) + 1)]\n _bounds, unbounds, _eqs = mask.split_vars(subdoms)\n \n # For a tuple, just return the subdoms for the unbound components.\n # If this isn't a tuple, then there must be at most one unbound\n # component, and its dompath is the same (not dompath.1).\n if isinstance(dom, tuple) and dom[0] == '<T>':\n result = unbounds\n else:\n if len(unbounds) == 0:\n result = ()\n else:\n assert len(unbounds) == 1\n result = (dompath,)\n \n if all(subdom in self.domain_subst for subdom in result):\n return result\n else:\n return None\n \n def nondet_cost_factor(self, subrel, submask, nondet, memconstrs):\n \"\"\"Given an entry returned by get_nondet_info(), along with\n comprehension membership constraints, produce a cost factor\n based on two separate bits of information: the domain\n information for the new variables, and the cost of the clause\n image set lookup (recursively simplified).\n \"\"\"\n # Get domain-based bound.\n # For each variable, gather all constraints on it, turn each\n # into a cost, and take the min of these costs. Multiply these\n # terms for each variable together.\n factors = []\n for v in nondet:\n containing = memconstrs[v]\n sizes = [self.dompath_to_size(dompath)\n for dompath in containing]\n factor = self.make_min(sizes)\n factors.append(factor)\n domain_cost = ProductCost(factors)\n \n # Get lookup-based bound.\n # Create a name or image set cost for the iterated relation\n # and try to simplify recursively. \n if all(c == 'u' for c in submask.parts):\n lookup_cost = NameCost(subrel)\n elif all(c != 'u' for c in submask.parts):\n lookup_cost = UnitCost()\n else:\n lookup_cost = IndefImgsetCost(subrel, submask)\n lookup_cost = self.visit(lookup_cost)\n # If recursing failed to simplify this cost to a term that\n # the user would understand -- specifically, if it is still\n # a name or image set cost whose iterated relation is *not*\n # in the domain substitution -- then don't use this cost\n # term at all. (This can happen if the relation is TUP, for\n # instance.)\n if ((isinstance(lookup_cost, NameCost) and\n lookup_cost.name not in self.domain_subst) or\n (isinstance(lookup_cost, (IndefImgsetCost, DefImgsetCost)) and\n lookup_cost.rel not in self.domain_subst)):\n cost = domain_cost\n else:\n cost = self.make_min((domain_cost, lookup_cost))\n \n return cost\n \n def assemble_nondet_cost(self, nondet_info, memconstrs):\n \"\"\"Given the return data from get_nondet_info, assemble\n an overall cost for the comprehension based on the bound\n for each clause.\n \"\"\"\n factors = []\n for subrel, submask, nondet in nondet_info:\n factor = self.nondet_cost_factor(subrel, submask, nondet,\n memconstrs)\n factors.append(factor)\n return ProductCost(factors)\n \n def visit_NameCost(self, cost):\n rel = cost.name\n \n inv_cost = None\n if rel in self.invs:\n spec = self.invs[rel].spec\n \n if isinstance(spec, CompSpec):\n info = get_nondet_info(spec, set())\n memconstrs = spec.get_membership_constraints()\n inv_cost = self.assemble_nondet_cost(info, memconstrs)\n \n elif isinstance(spec, AggrSpec):\n # Create an equivalent cost over the underlying operand\n # and process it instead.\n #\n # The number of entries in the aggregate relation is the\n # number of possible parameter values to the underlying\n # operand. Make a mask that projects out just the\n # parameters from this operand.\n mask = spec.relmask.make_param_proj_mask()\n inv_cost = IndefImgsetCost(spec.rel, mask)\n inv_cost = self.visit(cost)\n \n else:\n assert()\n \n # May return None.\n dom_cost = self.dompath_to_size(cost.name)\n \n cost = self.make_min((inv_cost, dom_cost))\n return cost\n \n def visit_IndefImgsetCost(self, cost):\n rel = cost.rel\n \n inv_cost = None\n if rel in self.invs:\n spec = self.invs[rel].spec\n \n if isinstance(spec, CompSpec):\n boundvars, _ = split_resexp_vars(spec.resexp, cost.mask)\n info = get_nondet_info(spec, boundvars)\n memconstrs = spec.get_membership_constraints()\n inv_cost = self.assemble_nondet_cost(info, memconstrs)\n \n elif isinstance(spec, AggrSpec):\n # As above for NameCost, but exclude the parameters that\n # are bound. First determine what parameters are bound\n # by the mask in this cost.\n vars = list(spec.params) + [object()] \n bounds, _unbounds, _eqs = cost.mask.split_vars(vars)\n # Now modify the parameter projection mask to project\n # them away. \n mask = spec.relmask.make_param_proj_mask()\n assert (len(spec.params) ==\n len([True for p in mask.parts if p == 'u']))\n params = iter(spec.params)\n new_parts = []\n for part in mask.parts:\n if part == 'u':\n p = next(params)\n if p in bounds:\n new_parts.append('w')\n continue\n new_parts.append(part)\n \n new_mask = Mask(new_parts)\n inv_cost = IndefImgsetCost(spec.rel, new_mask)\n inv_cost = self.visit(inv_cost)\n \n else:\n assert()\n \n # Get the dompath for each unbound component and multiply\n # them together. If any domain is unknown, the whole cost\n # is left alone.\n dom_cost = None\n dompaths = self.dompaths_for_mask(rel, cost.mask)\n if dompaths is not None:\n factors = [self.dompath_to_size(s) for s in dompaths]\n # Check for None again since there could've been a\n # deeper nested dompath missing.\n if all(c is not None for c in factors):\n dom_cost = ProductCost(factors)\n \n cost = self.make_min((inv_cost, dom_cost))\n return cost\n \n visit_DefImgsetCost = visit_IndefImgsetCost\n\n\ndef reinterpret_cost(cost, *, invs,\n domain_subst, domain_sizes, domain_costs,\n domain_names):\n \"\"\"Obtain a simplified cost using substitution rules for costs\n and domains.\n \n For convenience, the following shorthands are recognized for\n domain_sizes and domain_costs:\n \n - a value that is a string is interpreted as a name cost\n \n - a value that is the number 1 is interpreted as the unit cost\n \n# For convenience, the following shorthands are recognized for\n# cost_rules:\n# \n# - a key that is a string with one word is interpreted as\n# a relation name or domain\n# \n# - a key that is a string with two words (separated by\n# whitespace) is interpreted as an imageset cost\n# \n# - a value that is a string is interpreted as a name cost\n# \n# - a value that is the number 1 is interpreted as a unit cost\n \"\"\"\n# def keyexp(k):\n# if isinstance(k, str):\n# words = k.split()\n# if len(words) == 1:\n# k = NameCost(words[0])\n# elif len(words) == 2:\n# k = IndefImgsetCost(words[0], Mask(words[1]))\n# else:\n# assert()\n# return k\n \n def valexp(v):\n if isinstance(v, str):\n v = NameCost(v)\n elif v == 1:\n v = UnitCost()\n return v\n \n # Expand shorthands.\n domain_sizes = {k: valexp(v) for k, v in domain_sizes.items()}\n domain_costs = {k: valexp(v) for k, v in domain_costs.items()}\n \n domain_subst = add_domain_names(domain_subst, domain_names)\n \n # Now apply domain expansions for remaining name costs.\n new_cost = CostReinterpreter.run(cost, invs, domain_subst,\n domain_sizes, domain_costs,\n strip_min=False)\n \n return new_cost\n" }, { "alpha_fraction": 0.7142857313156128, "alphanum_fraction": 0.7142857313156128, "avg_line_length": 27, "blob_id": "32421f9b14f7a8ca204274bcef2a9c7a8a502340", "content_id": "e8132a72724f0ce72bc2e0af77ad652e7c564ef8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 28, "license_type": "no_license", "max_line_length": 27, "num_lines": 1, "path": "/experiments/wifi/__init__.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from .run_wifi_exp import *\n" }, { "alpha_fraction": 0.5264750123023987, "alphanum_fraction": 0.5295007824897766, "avg_line_length": 28.05494499206543, "blob_id": "7e883877bca6bfe26337d95a6f7758daa1eb5ed4", "content_id": "33756e530e96e09c63b74925ab4fe6f881c4d0d0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5288, "license_type": "no_license", "max_line_length": 73, "num_lines": 182, "path": "/experiments/twitter/gendb_wrapper.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Wrap gendb to provide an easy interface for generating pairs\nwith degree restrictions.\n\"\"\"\n\n\n__all__ = [\n 'gen_pairs',\n 'gen_pairs_with_inverse',\n 'steal_edges',\n 'print_pairinfo',\n 'print_deginfo',\n]\n\n\n#QUIET = False\nQUIET = True\n\n\nimport sys\nimport os\nfrom random import shuffle, randrange, choice\n\nfrom gendb.gen import Domain, Relation, Database\n\n\ndef gen_pairs(A, B, outdegree):\n \"\"\"Return a list of pairs (a, b) where a and b are drawn\n from the domains A and B respectively. A and B must be\n sequences. The outdegree is passed to gendb as a constraint.\n \"\"\"\n nA = len(A)\n nB = len(B)\n \n # Gendb likes to call sys.exit() on error conditions,\n # and flood stdout with status updates. Let's make it\n # play nicely.\n try:\n if QUIET:\n old_stdout = sys.stdout\n sys.stdout = open(os.devnull, 'w')\n \n DA = Domain(1, nA)\n DB = Domain(1, nB)\n R = Relation('R', DA, DB)\n R.Set_Max_Constr4(1, 2, outdegree)\n DB = Database('DB')\n DB.Add_Relations(R)\n DB.Generate()\n except SystemExit as exc:\n raise ValueError('Gendb error') from exc\n finally:\n if QUIET:\n sys.stdout.close()\n sys.stdout = old_stdout\n \n result = [(A[i-1], B[j-1]) for i, j in R.rel_content]\n return result\n\n\ndef gen_pairs_with_inverse(A, B, outdegree, req_inv=None, max_tries=5):\n \"\"\"As above, but also identify elements from B that have an\n ideal indegree of (|A| * outdegree / |B|). Return the resulting\n relation, along with a list of all elements from B ordered by\n their proximity to the ideal indegree.\n \n If req_inv is an integer, at least this many elements from B must\n have the ideal indegree. If such a configuration can't be found\n after max_tries many attempts, ValueError is raised.\n \"\"\"\n R = None\n Blist = None\n n_perfect = None\n \n def try_it():\n nonlocal R, Blist, n_perfect\n R = gen_pairs(A, B, outdegree)\n R_indegs = {}\n for _x, y in R:\n R_indegs[y] = R_indegs.get(y, 0) + 1\n \n Blist = list(B)\n shuffle(Blist)\n Blist.sort(key=lambda b: abs(R_indegs[b] - outdegree))\n n_perfect = sum(1 for b in B if R_indegs[b] == outdegree)\n \n if req_inv is None:\n try_it()\n return R, Blist\n \n else:\n tries = 0\n best = 0\n while tries < max_tries:\n try_it()\n if n_perfect >= req_inv:\n return R, Blist\n tries += 1\n best = max(best, n_perfect)\n else:\n raise ValueError('Failed to generate enough elements with '\n 'desired in-degree (best: {}/{})'.format(\n best, req_inv))\n\n\ndef steal_edges(A, R, chosen, n):\n \"\"\"Make a modified version of R in which additional chosen values\n from the A domain are padded with additional outgoing edges, stolen\n from other values in (A - chosen). Return the modified R.\n \"\"\"\n R = set(R)\n \n R_out = {}\n for x, y in R:\n R_out.setdefault(x, set()).add(y)\n \n for a in chosen:\n an = len(R_out.setdefault(a, set()))\n if an > n:\n raise ValueError('Value already has too many outgoing edges')\n if an < n:\n # Grab more edges.\n # Shuffle the edges. Note that it's more likely that we'll\n # grab an edge from a node with above-average outdegree.\n Rlist = list(R)\n shuffle(Rlist)\n for x, y in Rlist:\n # Don't steal from other chosen values.\n if x in chosen:\n continue\n # Can't steal if we already have it.\n if y in R_out[a]:\n continue\n # Move edge from x->y to a->y.\n R.remove((x, y))\n R_out[x].remove(y)\n R.add((a, y))\n R_out[a].add(y)\n if len(R_out[a]) == n:\n break\n else:\n raise ValueError('Failed to steal enough edges')\n \n return R\n\n\ndef move_edge(R, R_set, B):\n \"\"\"Given R as a list and set, and domain B, choose an edge\n (x, y) to remove and a new one (x, z) to add in its place.\n R and R_set are modified in-place. Return (x, y, z).\n \"\"\"\n i = randrange(len(R))\n x, y = R[i]\n z = y\n while (x, z) in R_set:\n z = choice(B)\n R[i] = (x, z)\n R_set.remove((x, y))\n R_set.add((x, z))\n return (x, y, z)\n\n\ndef print_pairinfo(R):\n \"\"\"Print min/max out/in degree info for a pair relation.\"\"\"\n R_out = {}\n R_in = {}\n for x, y in R:\n R_out.setdefault(x, set()).add(y)\n R_in.setdefault(y, set()).add(x)\n \n print('Min/max out-degrees: {} - {}'.format(\n min(len(s) for s in R_out.values()),\n max(len(s) for s in R_out.values())))\n print('Min/max in-degrees: {} - {}'.format(\n min(len(s) for s in R_in.values()),\n max(len(s) for s in R_in.values())))\n\ndef print_deginfo(R, bs):\n \"\"\"Print in-degree info for the given elements of B.\"\"\"\n R_in = {}\n for _, y in R:\n R_in[y] = R_in.get(y, 0) + 1\n print([R_in[b] for b in bs])\n" }, { "alpha_fraction": 0.40219560265541077, "alphanum_fraction": 0.4755488932132721, "avg_line_length": 32.98305130004883, "blob_id": "e9fcde6da6798634f1d271a2a97118b3d4a45490", "content_id": "ee4c422256373338407f2a4d9b673ee20b2f2b2a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2004, "license_type": "no_license", "max_line_length": 156, "num_lines": 59, "path": "/incoq/tests/programs/comp/patternmaint_out.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from incoq.runtime import *\n# Comp1 := {x : (x, x, y, _) in P, y in S}\n_m_P_u1bw = Map()\ndef _maint__m_P_u1bw_add(_e):\n (v7_1, v7_2, v7_3, v7_4) = _e\n if ((v7_1 == v7_2)):\n if (v7_3 not in _m_P_u1bw):\n _m_P_u1bw[v7_3] = RCSet()\n if (v7_1 not in _m_P_u1bw[v7_3]):\n _m_P_u1bw[v7_3].add(v7_1)\n else:\n _m_P_u1bw[v7_3].incref(v7_1)\n\n_m_P_b1bw = Map()\ndef _maint__m_P_b1bw_add(_e):\n (v5_1, v5_2, v5_3, v5_4) = _e\n if ((v5_1 == v5_2)):\n if ((v5_1, v5_3) not in _m_P_b1bw):\n _m_P_b1bw[(v5_1, v5_3)] = RCSet()\n if (() not in _m_P_b1bw[(v5_1, v5_3)]):\n _m_P_b1bw[(v5_1, v5_3)].add(())\n else:\n _m_P_b1bw[(v5_1, v5_3)].incref(())\n\nComp1 = RCSet()\ndef _maint_Comp1_P_add(_e):\n # Iterate {(v1_x, v1_y) : (v1_x, v1_x, v1_y, _) in deltamatch(P, 'b1bw', _e, 1), v1_y in S}\n for (v1_x, v1_y) in setmatch(({_e} if ((_m_P_b1bw[(_e[0], _e[2])] if ((_e[0], _e[2]) in _m_P_b1bw) else RCSet()).getref(()) == 1) else {}), 'u1uw', ()):\n if (v1_y in S):\n if (v1_x not in Comp1):\n Comp1.add(v1_x)\n else:\n Comp1.incref(v1_x)\n\ndef _maint_Comp1_S_add(_e):\n # Iterate {(v3_x, v3_y) : (v3_x, v3_x, v3_y, _) in P, v3_y in deltamatch(S, 'b', _e, 1)}\n v3_y = _e\n for v3_x in (_m_P_u1bw[v3_y] if (v3_y in _m_P_u1bw) else RCSet()):\n if (v3_x not in Comp1):\n Comp1.add(v3_x)\n else:\n Comp1.incref(v3_x)\n\nS = Set()\nfor v in {(1, 1, 2, 3), (1, 2, 2, 4)}:\n # Begin maint _m_P_u1bw after \"P.add(v)\"\n _maint__m_P_u1bw_add(v)\n # End maint _m_P_u1bw after \"P.add(v)\"\n # Begin maint _m_P_b1bw after \"P.add(v)\"\n _maint__m_P_b1bw_add(v)\n # End maint _m_P_b1bw after \"P.add(v)\"\n # Begin maint Comp1 after \"P.add(v)\"\n _maint_Comp1_P_add(v)\n # End maint Comp1 after \"P.add(v)\"\nS.add(2)\n# Begin maint Comp1 after \"S.add(2)\"\n_maint_Comp1_S_add(2)\n# End maint Comp1 after \"S.add(2)\"\nprint(sorted(Comp1))" }, { "alpha_fraction": 0.721145749092102, "alphanum_fraction": 0.7253580689430237, "avg_line_length": 27.261905670166016, "blob_id": "54be0494a15a3e021fa06a5d0438dd7d63e9aa38", "content_id": "3c57c9cb93fd135487e88db11e06b1072738e285", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1187, "license_type": "no_license", "max_line_length": 72, "num_lines": 42, "path": "/README.md", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "IncOQ is a system for generating incremental and demand-driven\nimplementations of object-set queries. The current source tree\nis a development version.\n\n## Configuration\n\nIncOQ requires Python 3.4.\n\nTo clone, run\n\n git clone https://github.com/IncOQ/incoq.git\n\ncd to the project directory, and run\n\n git submodule update --init\n\nThe following directories should be on the PYTHONPATH, relative to\nthe project root:\n\n . (i.e., the project root)\n simplestruct\n iast\n frexp (optional; needed for benchmarks)\n distalgo (optional; needed for distalgo benchmarks)\n gendb (optional; not yet publicly available)\n osq (optional; not yet publicly available)\n\nThese paths can be added to a bash shell by sourcing the env.sh script.\nUse the -w flag on Windows/Cygwin.\n\nIn addition, the following 3rd-party Python libraries are used:\n\n bintrees (needed for aggregate queries, can otherwise be omitted)\n tabulate (optional)\n numpy (optional; needed for benchmarks)\n matplotlib (optional; needed for benchmarks)\n\n ## Invocation\n \n A single input file may be transformed by running\n \n python34 -m incoq <input file> <output file>\n" }, { "alpha_fraction": 0.37278884649276733, "alphanum_fraction": 0.3808806836605072, "avg_line_length": 26.25128173828125, "blob_id": "cb238ab88dcf930674fd6638fe856ee6272b66a3", "content_id": "6391bb0d12b03d0fd7c837c61a0e11f7b051b166", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5314, "license_type": "no_license", "max_line_length": 61, "num_lines": 195, "path": "/incoq/tests/invinc/central/test_rewritings.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Unit tests for rewritings.py.\"\"\"\n\n\nimport unittest\n\nimport incoq.compiler.incast as L\nfrom incoq.compiler.central.manager import CentralCase\nfrom incoq.compiler.central.rewritings import *\n\n\nclass RewriterCase(CentralCase):\n \n def test_distalgo(self):\n tree = L.parse_structast('''\n len(set((a for a in S))) > 0\n ''')\n tree = import_distalgo(tree)\n exp_tree = L.parse_structast('''\n count({a for a in S}) > 0\n ''')\n self.assertEqual(tree, exp_tree)\n \n def test_relationfinder(self):\n tree = L.p('''\n R = incoq.runtime.Set()\n S = set()\n T = 5\n \n ''')\n res = RelationFinder.run(tree)\n exp_res = ['R', 'S']\n self.assertCountEqual(res, exp_res)\n \n tree = L.p('''\n R = Set()\n S = Set()\n T = Set()\n for x in R:\n S.add({(x, y) for y in R})\n print(T)\n ''')\n res = RelationFinder.run(tree)\n exp_res = ['R', 'S']\n self.assertCountEqual(res, exp_res)\n \n def test_macroupdaterewriter(self):\n tree = L.p('''\n A.update(B)\n A.intersection_update(B)\n A.difference_update(B)\n A.symmetric_difference_update(B)\n A.assign_update(B)\n A.clear()\n A.mapassign_update(B)\n A.mapclear()\n ''')\n tree = MacroUpdateRewriter.run(tree)\n \n exp_tree = L.p('''\n for _upelem in B:\n if (_upelem not in A):\n A.add(_upelem)\n for _upelem in list(A):\n if (_upelem not in B):\n A.remove(_upelem)\n for _upelem in list(B):\n if (_upelem in A):\n A.remove(_upelem)\n for _upelem in list(B):\n if (_upelem in A):\n A.remove(_upelem)\n else:\n A.add(_upelem)\n if A is not B:\n while (len(A) > 0):\n _upelem = next(iter(A))\n A.remove(_upelem)\n for _upelem in B:\n A.add(_upelem)\n while (len(A) > 0):\n _upelem = next(iter(A))\n A.remove(_upelem)\n if (A is not B):\n while (len(A) > 0):\n _upkey = next(iter(A))\n A.delkey(_upkey)\n for (_upkey, _upval) in B.items():\n A.assignkey(_upkey, _upval)\n while (len(A) > 0):\n _upkey = next(iter(A))\n A.delkey(_upkey)\n ''')\n \n self.assertEqual(tree, exp_tree)\n \n def test_updaterewriter(self):\n tree = L.p('''\n R.add((x, y))\n S.remove((o.f, v))\n T.add(z)\n T.remove((3 + 5, 4))\n U.add(foo(x))\n M.assignkey(k, v.g)\n M.assignkey(o.f, v.g)\n M.delkey(o.f)\n o.f.add(x)\n ''')\n tree = UpdateRewriter.run(tree, self.manager.namegen)\n exp_tree = L.p('''\n R.add((x, y))\n v1 = (o.f, v)\n S.remove(v1)\n T.add(z)\n T.remove((3 + 5, 4))\n v2 = foo(x)\n U.add(v2)\n v3 = v.g\n M.assignkey(k, v3)\n v4 = o.f\n v5 = v.g\n M.assignkey(v4, v5)\n v6 = o.f\n M.delkey(v6)\n v7 = o.f\n v7.add(x)\n ''')\n self.assertEqual(tree, exp_tree)\n \n def test_strictrewriter(self):\n tree = L.p('''\n S.add(1)\n S.remove(2)\n o.f = v\n del o.g\n m.assignkey(k, v)\n m.delkey(j)\n ''')\n tree = StrictUpdateRewriter.run(tree)\n exp_tree = L.p('''\n if (1 not in S):\n S.add(1)\n if (2 in S):\n S.remove(2)\n if hasattr(o, 'f'):\n del o.f\n o.f = v\n if hasattr(o, 'g'):\n del o.g\n if (k in m):\n m.delkey(k)\n m.assignkey(k, v)\n if (j in m):\n m.delkey(j)\n ''')\n self.assertEqual(tree, exp_tree)\n \n def test_minmax(self):\n tree = L.p('''\n max({1} | {x for x in R} | S)\n max({1} & {1, 2})\n ''')\n tree = MinMaxRewriter.run(tree)\n exp_tree = L.p('''\n max2(max2(1), max({x for x in R}), max(S))\n max({1} & {1, 2})\n ''')\n self.assertEqual(tree, exp_tree)\n \n def test_deadcode(self):\n tree = L.p('''\n A = Set()\n B = Set()\n C = Set()\n for x in A:\n B.add(y)\n C.add(z)\n print(C)\n ''')\n tree = eliminate_deadcode(tree, obj_domain_out=True)\n \n exp_tree = L.p('''\n A = Set()\n pass\n C = Set()\n for x in A:\n pass\n C.add(z)\n print(C)\n ''')\n \n self.assertEqual(tree, exp_tree)\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.6409921646118164, "alphanum_fraction": 0.6409921646118164, "avg_line_length": 24.53333282470703, "blob_id": "f12e94974148e51ff55157f77b840899e16ed5fd", "content_id": "7e8cb42effd9c0f93c242b08ade2655beb9a6c31", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 766, "license_type": "no_license", "max_line_length": 74, "num_lines": 30, "path": "/incoq/tests/invinc/incast/test_nodes_untyped.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Unit tests for nodes.py.\"\"\"\n\n\nimport unittest\nimport ast\nimport iast\n\nfrom incoq.compiler.incast.nodes_untyped import *\n\n\nclass NodesCast(unittest.TestCase):\n \n def test_native_namespace(self):\n Pass = native_nodes['Pass']\n self.assertIs(Pass, ast.Pass)\n Comment = native_nodes['Comment']\n self.assertTrue(issubclass(Comment, ast.AST))\n \n def test_incast_namespace(self):\n _Pass = incast_nodes['Pass']\n self.assertTrue(issubclass(_Pass, iast.AST))\n # IncAST nodes are also exported in the module's global namespace.\n self.assertIs(Pass, _Pass)\n \n # TODO: Can have unit tests for nodes with special __new__()\n # methods that do coercion.\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.46700507402420044, "alphanum_fraction": 0.5525185465812683, "avg_line_length": 38.52469253540039, "blob_id": "b6a479157b0a76cda87a457d7d9b92d53aca6068", "content_id": "6f68aa0ca7fafa6991e6bcd70ae637eae063b376", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12805, "license_type": "no_license", "max_line_length": 139, "num_lines": 324, "path": "/incoq/tests/programs/aggr/nested/aggrdem_out.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from incoq.runtime import *\n# Comp1 := {(x, y) : x in _U_Comp1, (x, y) in E}\n# Comp1_Tx1 := {x : x in _U_Comp1}\n# Comp1_dE := {(x, y) : x in Comp1_Tx1, (x, y) in E}\n# Aggr1 := sum(DEMQUERY(Comp1, [x], setmatch(Comp1, 'bu', x)), None)\n# Comp12 := {x : x in S, _av1 in {Aggr1.smlookup('bu', x, None)}, (x < _av1)}\n# Comp12_Tx := {x : x in S}\n# Aggr1_delta := {x : x in Comp12_Tx}\n_m_Comp1_dE_out = Map()\ndef _maint__m_Comp1_dE_out_add(_e):\n (v30_1, v30_2) = _e\n if (v30_1 not in _m_Comp1_dE_out):\n _m_Comp1_dE_out[v30_1] = set()\n _m_Comp1_dE_out[v30_1].add(v30_2)\n\ndef _maint__m_Comp1_dE_out_remove(_e):\n (v31_1, v31_2) = _e\n _m_Comp1_dE_out[v31_1].remove(v31_2)\n if (len(_m_Comp1_dE_out[v31_1]) == 0):\n del _m_Comp1_dE_out[v31_1]\n\n_m_E_out = Map()\ndef _maint__m_E_out_add(_e):\n (v28_1, v28_2) = _e\n if (v28_1 not in _m_E_out):\n _m_E_out[v28_1] = set()\n _m_E_out[v28_1].add(v28_2)\n\n_m_Comp1_out = Map()\ndef _maint__m_Comp1_out_add(_e):\n (v26_1, v26_2) = _e\n if (v26_1 not in _m_Comp1_out):\n _m_Comp1_out[v26_1] = set()\n _m_Comp1_out[v26_1].add(v26_2)\n\ndef _maint__m_Comp1_out_remove(_e):\n (v27_1, v27_2) = _e\n _m_Comp1_out[v27_1].remove(v27_2)\n if (len(_m_Comp1_out[v27_1]) == 0):\n del _m_Comp1_out[v27_1]\n\n_m_Aggr1_out = Map()\ndef _maint__m_Aggr1_out_add(_e):\n (v24_1, v24_2) = _e\n if (v24_1 not in _m_Aggr1_out):\n _m_Aggr1_out[v24_1] = set()\n _m_Aggr1_out[v24_1].add(v24_2)\n\ndef _maint__m_Aggr1_out_remove(_e):\n (v25_1, v25_2) = _e\n _m_Aggr1_out[v25_1].remove(v25_2)\n if (len(_m_Aggr1_out[v25_1]) == 0):\n del _m_Aggr1_out[v25_1]\n\nAggr1_delta = RCSet()\ndef _maint_Aggr1_delta_Comp12_Tx_add(_e):\n # Iterate {v21_x : v21_x in deltamatch(Comp12_Tx, 'b', _e, 1)}\n v21_x = _e\n Aggr1_delta.add(v21_x)\n\ndef _maint_Comp12_Tx_S_add(_e):\n # Iterate {v19_x : v19_x in deltamatch(S, 'b', _e, 1)}\n v19_x = _e\n # Begin maint Aggr1_delta after \"Comp12_Tx.add(v19_x)\"\n _maint_Aggr1_delta_Comp12_Tx_add(v19_x)\n # End maint Aggr1_delta after \"Comp12_Tx.add(v19_x)\"\n\nComp12 = RCSet()\ndef _maint_Comp12_S_add(_e):\n # Iterate {(v15_x, v15__av1) : v15_x in deltamatch(S, 'b', _e, 1), v15__av1 in {Aggr1.smlookup('bu', v15_x, None)}, (v15_x < v15__av1)}\n v15_x = _e\n for v15__av1 in (_m_Aggr1_out[v15_x] if (v15_x in _m_Aggr1_out) else set()):\n if (v15_x < v15__av1):\n if (v15_x not in Comp12):\n Comp12.add(v15_x)\n else:\n Comp12.incref(v15_x)\n\ndef _maint_Comp12_Aggr1_add(_e):\n # Iterate {(v17_x, v17__av1) : v17_x in S, (v17_x, v17__av1) in deltamatch(Aggr1, 'bb', _e, 1), (v17_x < v17__av1)}\n (v17_x, v17__av1) = _e\n if (v17_x in S):\n if (v17_x < v17__av1):\n if (v17_x not in Comp12):\n Comp12.add(v17_x)\n else:\n Comp12.incref(v17_x)\n\ndef _maint_Comp12_Aggr1_remove(_e):\n # Iterate {(v18_x, v18__av1) : v18_x in S, (v18_x, v18__av1) in deltamatch(Aggr1, 'bb', _e, 1), (v18_x < v18__av1)}\n (v18_x, v18__av1) = _e\n if (v18_x in S):\n if (v18_x < v18__av1):\n if (Comp12.getref(v18_x) == 1):\n Comp12.remove(v18_x)\n else:\n Comp12.decref(v18_x)\n\ndef _maint_Aggr1_add(_e):\n (v11_v1, v11_v2) = _e\n if (v11_v1 in _U_Aggr1):\n v11_val = _m_Aggr1_out.singlelookup(v11_v1)\n v11_val = (v11_val + v11_v2)\n v11_1 = v11_v1\n v11_elem = _m_Aggr1_out.singlelookup(v11_v1)\n # Begin maint Comp12 before \"Aggr1.remove((v11_1, v11_elem))\"\n _maint_Comp12_Aggr1_remove((v11_1, v11_elem))\n # End maint Comp12 before \"Aggr1.remove((v11_1, v11_elem))\"\n # Begin maint _m_Aggr1_out before \"Aggr1.remove((v11_1, v11_elem))\"\n _maint__m_Aggr1_out_remove((v11_1, v11_elem))\n # End maint _m_Aggr1_out before \"Aggr1.remove((v11_1, v11_elem))\"\n # Begin maint _m_Aggr1_out after \"Aggr1.add((v11_1, v11_val))\"\n _maint__m_Aggr1_out_add((v11_1, v11_val))\n # End maint _m_Aggr1_out after \"Aggr1.add((v11_1, v11_val))\"\n # Begin maint Comp12 after \"Aggr1.add((v11_1, v11_val))\"\n _maint_Comp12_Aggr1_add((v11_1, v11_val))\n # End maint Comp12 after \"Aggr1.add((v11_1, v11_val))\"\n\ndef _maint_Aggr1_remove(_e):\n (v12_v1, v12_v2) = _e\n if (v12_v1 in _U_Aggr1):\n v12_val = _m_Aggr1_out.singlelookup(v12_v1)\n v12_val = (v12_val - v12_v2)\n v12_1 = v12_v1\n v12_elem = _m_Aggr1_out.singlelookup(v12_v1)\n # Begin maint Comp12 before \"Aggr1.remove((v12_1, v12_elem))\"\n _maint_Comp12_Aggr1_remove((v12_1, v12_elem))\n # End maint Comp12 before \"Aggr1.remove((v12_1, v12_elem))\"\n # Begin maint _m_Aggr1_out before \"Aggr1.remove((v12_1, v12_elem))\"\n _maint__m_Aggr1_out_remove((v12_1, v12_elem))\n # End maint _m_Aggr1_out before \"Aggr1.remove((v12_1, v12_elem))\"\n # Begin maint _m_Aggr1_out after \"Aggr1.add((v12_1, v12_val))\"\n _maint__m_Aggr1_out_add((v12_1, v12_val))\n # End maint _m_Aggr1_out after \"Aggr1.add((v12_1, v12_val))\"\n # Begin maint Comp12 after \"Aggr1.add((v12_1, v12_val))\"\n _maint_Comp12_Aggr1_add((v12_1, v12_val))\n # End maint Comp12 after \"Aggr1.add((v12_1, v12_val))\"\n\n_U_Aggr1 = RCSet()\n_UEXT_Aggr1 = Set()\ndef demand_Aggr1(x):\n \"sum(DEMQUERY(Comp1, [x], setmatch(Comp1, 'bu', x)), None)\"\n if (x not in _U_Aggr1):\n _U_Aggr1.add(x)\n # Begin maint Aggr1 after \"_U_Aggr1.add(x)\"\n v13_val = 0\n for v13_elem in (_m_Comp1_out[x] if (x in _m_Comp1_out) else set()):\n v13_val = (v13_val + v13_elem)\n v13_1 = x\n # Begin maint _m_Aggr1_out after \"Aggr1.add((v13_1, v13_val))\"\n _maint__m_Aggr1_out_add((v13_1, v13_val))\n # End maint _m_Aggr1_out after \"Aggr1.add((v13_1, v13_val))\"\n # Begin maint Comp12 after \"Aggr1.add((v13_1, v13_val))\"\n _maint_Comp12_Aggr1_add((v13_1, v13_val))\n # End maint Comp12 after \"Aggr1.add((v13_1, v13_val))\"\n demand_Comp1(x)\n # End maint Aggr1 after \"_U_Aggr1.add(x)\"\n else:\n _U_Aggr1.incref(x)\n\ndef undemand_Aggr1(x):\n \"sum(DEMQUERY(Comp1, [x], setmatch(Comp1, 'bu', x)), None)\"\n if (_U_Aggr1.getref(x) == 1):\n # Begin maint Aggr1 before \"_U_Aggr1.remove(x)\"\n undemand_Comp1(x)\n v14_1 = x\n v14_elem = _m_Aggr1_out.singlelookup(x)\n # Begin maint Comp12 before \"Aggr1.remove((v14_1, v14_elem))\"\n _maint_Comp12_Aggr1_remove((v14_1, v14_elem))\n # End maint Comp12 before \"Aggr1.remove((v14_1, v14_elem))\"\n # Begin maint _m_Aggr1_out before \"Aggr1.remove((v14_1, v14_elem))\"\n _maint__m_Aggr1_out_remove((v14_1, v14_elem))\n # End maint _m_Aggr1_out before \"Aggr1.remove((v14_1, v14_elem))\"\n # End maint Aggr1 before \"_U_Aggr1.remove(x)\"\n _U_Aggr1.remove(x)\n else:\n _U_Aggr1.decref(x)\n\ndef query_Aggr1(x):\n \"sum(DEMQUERY(Comp1, [x], setmatch(Comp1, 'bu', x)), None)\"\n if (x not in _UEXT_Aggr1):\n _UEXT_Aggr1.add(x)\n demand_Aggr1(x)\n return True\n\nComp1_dE = RCSet()\ndef _maint_Comp1_dE_Comp1_Tx1_add(_e):\n # Iterate {(v7_x, v7_y) : v7_x in deltamatch(Comp1_Tx1, 'b', _e, 1), (v7_x, v7_y) in E}\n v7_x = _e\n for v7_y in (_m_E_out[v7_x] if (v7_x in _m_E_out) else set()):\n Comp1_dE.add((v7_x, v7_y))\n # Begin maint _m_Comp1_dE_out after \"Comp1_dE.add((v7_x, v7_y))\"\n _maint__m_Comp1_dE_out_add((v7_x, v7_y))\n # End maint _m_Comp1_dE_out after \"Comp1_dE.add((v7_x, v7_y))\"\n\ndef _maint_Comp1_dE_Comp1_Tx1_remove(_e):\n # Iterate {(v8_x, v8_y) : v8_x in deltamatch(Comp1_Tx1, 'b', _e, 1), (v8_x, v8_y) in E}\n v8_x = _e\n for v8_y in (_m_E_out[v8_x] if (v8_x in _m_E_out) else set()):\n # Begin maint _m_Comp1_dE_out before \"Comp1_dE.remove((v8_x, v8_y))\"\n _maint__m_Comp1_dE_out_remove((v8_x, v8_y))\n # End maint _m_Comp1_dE_out before \"Comp1_dE.remove((v8_x, v8_y))\"\n Comp1_dE.remove((v8_x, v8_y))\n\ndef _maint_Comp1_dE_E_add(_e):\n # Iterate {(v9_x, v9_y) : v9_x in Comp1_Tx1, (v9_x, v9_y) in deltamatch(E, 'bb', _e, 1)}\n (v9_x, v9_y) = _e\n if (v9_x in Comp1_Tx1):\n Comp1_dE.add((v9_x, v9_y))\n # Begin maint _m_Comp1_dE_out after \"Comp1_dE.add((v9_x, v9_y))\"\n _maint__m_Comp1_dE_out_add((v9_x, v9_y))\n # End maint _m_Comp1_dE_out after \"Comp1_dE.add((v9_x, v9_y))\"\n\nComp1_Tx1 = RCSet()\ndef _maint_Comp1_Tx1__U_Comp1_add(_e):\n # Iterate {v5_x : v5_x in deltamatch(_U_Comp1, 'b', _e, 1)}\n v5_x = _e\n Comp1_Tx1.add(v5_x)\n # Begin maint Comp1_dE after \"Comp1_Tx1.add(v5_x)\"\n _maint_Comp1_dE_Comp1_Tx1_add(v5_x)\n # End maint Comp1_dE after \"Comp1_Tx1.add(v5_x)\"\n\ndef _maint_Comp1_Tx1__U_Comp1_remove(_e):\n # Iterate {v6_x : v6_x in deltamatch(_U_Comp1, 'b', _e, 1)}\n v6_x = _e\n # Begin maint Comp1_dE before \"Comp1_Tx1.remove(v6_x)\"\n _maint_Comp1_dE_Comp1_Tx1_remove(v6_x)\n # End maint Comp1_dE before \"Comp1_Tx1.remove(v6_x)\"\n Comp1_Tx1.remove(v6_x)\n\ndef _maint_Comp1__U_Comp1_add(_e):\n # Iterate {(v1_x, v1_y) : v1_x in deltamatch(_U_Comp1, 'b', _e, 1), (v1_x, v1_y) in Comp1_dE}\n v1_x = _e\n for v1_y in (_m_Comp1_dE_out[v1_x] if (v1_x in _m_Comp1_dE_out) else set()):\n # Begin maint _m_Comp1_out after \"Comp1.add((v1_x, v1_y))\"\n _maint__m_Comp1_out_add((v1_x, v1_y))\n # End maint _m_Comp1_out after \"Comp1.add((v1_x, v1_y))\"\n # Begin maint Aggr1 after \"Comp1.add((v1_x, v1_y))\"\n _maint_Aggr1_add((v1_x, v1_y))\n # End maint Aggr1 after \"Comp1.add((v1_x, v1_y))\"\n\ndef _maint_Comp1__U_Comp1_remove(_e):\n # Iterate {(v2_x, v2_y) : v2_x in deltamatch(_U_Comp1, 'b', _e, 1), (v2_x, v2_y) in Comp1_dE}\n v2_x = _e\n for v2_y in (_m_Comp1_dE_out[v2_x] if (v2_x in _m_Comp1_dE_out) else set()):\n # Begin maint Aggr1 before \"Comp1.remove((v2_x, v2_y))\"\n _maint_Aggr1_remove((v2_x, v2_y))\n # End maint Aggr1 before \"Comp1.remove((v2_x, v2_y))\"\n # Begin maint _m_Comp1_out before \"Comp1.remove((v2_x, v2_y))\"\n _maint__m_Comp1_out_remove((v2_x, v2_y))\n # End maint _m_Comp1_out before \"Comp1.remove((v2_x, v2_y))\"\n\ndef _maint_Comp1_E_add(_e):\n # Iterate {(v3_x, v3_y) : v3_x in _U_Comp1, (v3_x, v3_y) in deltamatch(Comp1_dE, 'bb', _e, 1), (v3_x, v3_y) in Comp1_dE}\n (v3_x, v3_y) = _e\n if (v3_x in _U_Comp1):\n if ((v3_x, v3_y) in Comp1_dE):\n # Begin maint _m_Comp1_out after \"Comp1.add((v3_x, v3_y))\"\n _maint__m_Comp1_out_add((v3_x, v3_y))\n # End maint _m_Comp1_out after \"Comp1.add((v3_x, v3_y))\"\n # Begin maint Aggr1 after \"Comp1.add((v3_x, v3_y))\"\n _maint_Aggr1_add((v3_x, v3_y))\n # End maint Aggr1 after \"Comp1.add((v3_x, v3_y))\"\n\n_U_Comp1 = RCSet()\n_UEXT_Comp1 = Set()\ndef demand_Comp1(x):\n '{(x, y) : x in _U_Comp1, (x, y) in E}'\n if (x not in _U_Comp1):\n _U_Comp1.add(x)\n # Begin maint Comp1_Tx1 after \"_U_Comp1.add(x)\"\n _maint_Comp1_Tx1__U_Comp1_add(x)\n # End maint Comp1_Tx1 after \"_U_Comp1.add(x)\"\n # Begin maint Comp1 after \"_U_Comp1.add(x)\"\n _maint_Comp1__U_Comp1_add(x)\n # End maint Comp1 after \"_U_Comp1.add(x)\"\n else:\n _U_Comp1.incref(x)\n\ndef undemand_Comp1(x):\n '{(x, y) : x in _U_Comp1, (x, y) in E}'\n if (_U_Comp1.getref(x) == 1):\n # Begin maint Comp1 before \"_U_Comp1.remove(x)\"\n _maint_Comp1__U_Comp1_remove(x)\n # End maint Comp1 before \"_U_Comp1.remove(x)\"\n # Begin maint Comp1_Tx1 before \"_U_Comp1.remove(x)\"\n _maint_Comp1_Tx1__U_Comp1_remove(x)\n # End maint Comp1_Tx1 before \"_U_Comp1.remove(x)\"\n _U_Comp1.remove(x)\n else:\n _U_Comp1.decref(x)\n\ndef query_Comp1(x):\n '{(x, y) : x in _U_Comp1, (x, y) in E}'\n if (x not in _UEXT_Comp1):\n _UEXT_Comp1.add(x)\n demand_Comp1(x)\n return True\n\nS = Set()\nfor e in [1, 2, 4, 8]:\n S.add(e)\n # Begin maint Comp12_Tx after \"S.add(e)\"\n _maint_Comp12_Tx_S_add(e)\n # End maint Comp12_Tx after \"S.add(e)\"\n # Begin maint Comp12 after \"S.add(e)\"\n _maint_Comp12_S_add(e)\n # End maint Comp12 after \"S.add(e)\"\n # Begin maint demand_Aggr1 after \"S.add(e)\"\n for v23_x in Aggr1_delta.elements():\n demand_Aggr1(v23_x)\n Aggr1_delta.clear()\n # End maint demand_Aggr1 after \"S.add(e)\"\nfor e in [(1, 2), (1, 3), (2, 1), (3, 4), (8, 1), (8, 4)]:\n # Begin maint _m_E_out after \"E.add(e)\"\n _maint__m_E_out_add(e)\n # End maint _m_E_out after \"E.add(e)\"\n # Begin maint Comp1_dE after \"E.add(e)\"\n _maint_Comp1_dE_E_add(e)\n # End maint Comp1_dE after \"E.add(e)\"\n # Begin maint Comp1 after \"E.add(e)\"\n _maint_Comp1_E_add(e)\n # End maint Comp1 after \"E.add(e)\"\nprint(sorted(Comp12))" }, { "alpha_fraction": 0.7151639461517334, "alphanum_fraction": 0.7151639461517334, "avg_line_length": 25.14285659790039, "blob_id": "4775d674d200b12d4bf9ceb4268bc618e6358fdf", "content_id": "a8ac20e9794e70535e43bd540b7c4f2616f4bf52", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1464, "license_type": "no_license", "max_line_length": 68, "num_lines": 56, "path": "/incoq/compiler/incast/__init__.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Definition of the IncAST language nodes, and a suite of utilities\nfor manipulating and converting them.\n\nWe use \"PyAST\" to refer to ASTs representing Python programs. These\nASTs may only contain Python node types, plus the special Comment\nnode type. We use \"IncAST\" to refer to ASTs that may also include\nthe types defined in nodes.py.\n\"\"\"\n\n\n# Mostly re-export things from our modules, but also define parse\n# and unparse functions that do PyInc importing/exporting and\n# macro node processing.\n\n# Exports.\nfrom .nodes import *\nfrom .structconv import *\nfrom .error import *\nfrom .types import *\nfrom .typeeval import *\nfrom .helpers import *\nfrom .util import *\nfrom .nodeconv import *\nfrom .macros import *\nfrom .inline import *\nfrom .treeconv import *\n\n\ndef import_incast(tree):\n tree = OptionsRewriter.run(tree)\n return IncLangImporter.run(tree)\n\ndef export_incast(tree):\n return IncLangExporter.run(tree)\n\ndef p(*args, **kargs):\n tree = parse_structast(*args, **kargs)\n tree = IncMacroProcessor.run(tree)\n return tree\n\ndef pc(*args, mode=None, **kargs):\n return p(*args, mode='code', **kargs)\n\ndef ps(*args, mode=None, **kargs):\n return p(*args, mode='stmt', **kargs)\n\ndef pe(*args, mode=None, **kargs):\n return p(*args, mode='expr', **kargs)\n\ndef ts(tree):\n tree = IncLangExporter.run(tree)\n return unparse_structast(tree)\n\ndef ts_typed(tree):\n tree = IncLangExporter.run(tree)\n return unparse_structast_typed(tree)\n" }, { "alpha_fraction": 0.4427777826786041, "alphanum_fraction": 0.45055556297302246, "avg_line_length": 28.983333587646484, "blob_id": "143274b1317b9e14f3edeab883ba6a9dfdbe7e51", "content_id": "40f671c164a1e76cde28b3ea4345527fdc5c2a48", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1800, "license_type": "no_license", "max_line_length": 72, "num_lines": 60, "path": "/incoq/tests/invinc/tup/test_tuprel.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Unit tests for tupletrans.py.\"\"\"\n\n\nimport unittest\n\nimport incoq.compiler.incast as L\nfrom incoq.compiler.set import Mask\nfrom incoq.compiler.tup.tuprel import *\n\n\nclass TuprelCase(unittest.TestCase):\n \n def test_trel_helpers(self):\n trel = make_trel(2)\n \n self.assertTrue(is_trel(trel))\n self.assertFalse(is_trel('_M'))\n \n arity = get_trel(trel)\n self.assertEqual(arity, 2)\n \n def test_trel_bindmatch(self):\n code = trel_bindmatch('_TUP2', Mask('bbu'), ['t', 'x', 'y'],\n L.pc('pass'), typecheck=True)\n exp_code = L.pc('''\n if (isinstance(t, tuple) and (len(t) == 2)):\n if (t[1] == x):\n for y in setmatch({(t, t[0], t[1])}, 'bbu', (t, x)):\n pass\n ''')\n self.assertEqual(code, exp_code)\n \n code = trel_bindmatch('_TUP2', Mask('bbu'), ['t', 'x', 'y'],\n L.pc('pass'), typecheck=False)\n exp_code = L.pc('''\n if (t[1] == x):\n for y in setmatch({(t, t[0], t[1])}, 'bbu', (t, x)):\n pass\n ''')\n self.assertEqual(code, exp_code)\n \n code = trel_bindmatch('_TUP2', Mask('ubw'), ['t', 'x', 'y'],\n L.pc('pass'), typecheck=True)\n exp_code = L.pc('''\n for t in setmatch(_TUP2, 'ubw', x):\n pass\n ''')\n self.assertEqual(code, exp_code)\n \n def test_checkbad(self):\n tree = L.pc('''\n for t in setmatch(_TUP2, 'ubw', x):\n pass\n ''')\n with self.assertRaises(AssertionError):\n check_bad_setmatches(tree)\n\n\nif __name__ == '__main__':\n unittest.main()\n\n" }, { "alpha_fraction": 0.3687913417816162, "alphanum_fraction": 0.40199804306030273, "avg_line_length": 32.52358627319336, "blob_id": "6333d2150189fb2c0ef3a82194f82bf36dafc0e2", "content_id": "da658c6b5ca2bffb0565666fa5a82ff78bfb7203", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7107, "license_type": "no_license", "max_line_length": 75, "num_lines": 212, "path": "/incoq/tests/invinc/set/test_auxmap.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Unit tests for auxmap.py.\"\"\"\n\n\nimport unittest\n\nimport incoq.compiler.incast as L\nfrom incoq.compiler.set import Mask, AuxmapSpec\nfrom incoq.compiler.central import CentralCase\nfrom incoq.compiler.set.auxmap import *\nfrom incoq.compiler.set.auxmap import make_auxmap_maint_code\n\n\nclass TestAuxmap(CentralCase):\n \n def mainttest_helper(self, maskstr):\n spec = AuxmapSpec('R', Mask(maskstr))\n \n # Make the prefix '_' so it's easier to read/type.\n self.manager.namegen.next_prefix = lambda: '_'\n \n code = make_auxmap_maint_code(self.manager, spec, L.ln('e'), 'add')\n \n return code\n \n def test_auxmap_inv_maint(self):\n tree = self.mainttest_helper('bubuu')\n \n exp_tree = L.pc('''\n (_1, _2, _3, _4, _5) = e\n if ((_1, _3) not in _m_R_bubuu):\n _m_R_bubuu.assignkey((_1, _3), set())\n _m_R_bubuu[(_1, _3)].add((_2, _4, _5))\n ''')\n \n self.assertEqual(tree, exp_tree)\n \n def test_auxmap_inv_maint_fancy(self):\n tree = self.mainttest_helper('u1b3ww')\n \n exp_tree = L.pc('''\n (_1, _2, _3, _4, _5, _6) = e\n if ((_1 == _2) and (_3 == _4)):\n if (_3 not in _m_R_u1b3ww):\n _m_R_u1b3ww.assignkey(_3, RCSet())\n if (_1 not in _m_R_u1b3ww[_3]):\n _m_R_u1b3ww[_3].add(_1)\n else:\n _m_R_u1b3ww[_3].incref(_1)\n ''')\n \n self.assertEqual(tree, exp_tree)\n \n def test_auxmap_inv_maint_allbound(self):\n tree = self.mainttest_helper('bb')\n \n exp_tree = L.pc('''\n (_1, _2) = e\n if ((_1, _2) not in _m_R_bb):\n _m_R_bb.assignkey((_1, _2), set())\n _m_R_bb[(_1, _2)].add(())\n ''')\n \n self.assertEqual(tree, exp_tree)\n \n def test_transform_degenerate_allunbound(self):\n tree = self.mainttest_helper('uu')\n \n exp_tree = L.pc('''\n (_1, _2) = e\n if (() not in _m_R_uu):\n _m_R_uu.assignkey((), set())\n _m_R_uu[()].add((_1, _2))\n ''')\n \n self.assertEqual(tree, exp_tree)\n \n def test_inc_relmatch(self):\n spec = AuxmapSpec('R', Mask('bu'))\n \n tree = L.p('''\n R.add((1, 2))\n print(setmatch(R, 'bu', 1))\n ''')\n \n tree = inc_relmatch(tree, self.manager, spec)\n \n exp_tree = L.p('''\n _m_R_out = Map()\n def _maint__m_R_out_add(_e):\n (v1_1, v1_2) = _e\n if (v1_1 not in _m_R_out):\n _m_R_out.assignkey(v1_1, set())\n _m_R_out[v1_1].add(v1_2)\n \n def _maint__m_R_out_remove(_e):\n (v2_1, v2_2) = _e\n _m_R_out[v2_1].remove(v2_2)\n if _m_R_out[v2_1].isempty():\n _m_R_out.delkey(v2_1)\n \n with MAINT(_m_R_out, 'after', 'R.add((1, 2))'):\n R.add((1, 2))\n _maint__m_R_out_add((1, 2))\n print(_m_R_out.imglookup(1))\n ''')\n \n self.assertEqual(tree, exp_tree)\n \n def test_queryfinder(self):\n code = L.p('''\n print(setmatch(R, 'bu', a))\n print(setmatch(R, 'ub', a))\n print(setmatch(R, 'bu', b))\n print(setmatch({(1, 2)}, 'bu', 1))\n S.add((3, 4, 5))\n print(S.smlookup('bbu', (3, 4)))\n ''')\n auxmap_specs = RelmatchQueryFinder.run(code)\n \n exp_specs = {AuxmapSpec('R', Mask('bu')),\n AuxmapSpec('R', Mask('ub')),\n AuxmapSpec('S', Mask('bbu'))}\n \n self.assertCountEqual(auxmap_specs, exp_specs)\n \n def test_deltamatch(self):\n code = L.p('deltamatch(R, \"bbw\", e, 1)')\n code = DeltaMatchRewriter.run(code)\n exp_code = L.p('''\n ({e} if (setmatch(R, 'bbw', (e[0], e[1])).getref(()) == 1)\n else {})\n ''')\n self.assertEqual(code, exp_code)\n \n def test_transform(self):\n tree = L.p('''\n R.add((1, 2))\n R.remove((1, 2))\n print(setmatch(R, 'bu', a))\n print(setmatch(R, 'ub', a))\n print(setmatch(R, 'bu', b))\n print(setmatch({(1, 2)}, 'bu', 1))\n S.add((3, 4, 5))\n print(S.smlookup('bbu', (3, 4)))\n ''')\n \n tree = inc_all_relmatch(tree, self.manager)\n \n exp_tree = L.p('''\n _m_S_bbu = Map()\n def _maint__m_S_bbu_add(_e):\n (v5_1, v5_2, v5_3) = _e\n if ((v5_1, v5_2) not in _m_S_bbu):\n _m_S_bbu.assignkey((v5_1, v5_2), set())\n _m_S_bbu[(v5_1, v5_2)].add(v5_3)\n \n def _maint__m_S_bbu_remove(_e):\n (v6_1, v6_2, v6_3) = _e\n _m_S_bbu[(v6_1, v6_2)].remove(v6_3)\n if _m_S_bbu[(v6_1, v6_2)].isempty():\n _m_S_bbu.delkey((v6_1, v6_2))\n \n _m_R_in = Map()\n def _maint__m_R_in_add(_e):\n (v3_1, v3_2) = _e\n if (v3_2 not in _m_R_in):\n _m_R_in.assignkey(v3_2, set())\n _m_R_in[v3_2].add(v3_1)\n \n def _maint__m_R_in_remove(_e):\n (v4_1, v4_2) = _e\n _m_R_in[v4_2].remove(v4_1)\n if _m_R_in[v4_2].isempty():\n _m_R_in.delkey(v4_2)\n \n _m_R_out = Map()\n def _maint__m_R_out_add(_e):\n (v1_1, v1_2) = _e\n if (v1_1 not in _m_R_out):\n _m_R_out.assignkey(v1_1, set())\n _m_R_out[v1_1].add(v1_2)\n \n def _maint__m_R_out_remove(_e):\n (v2_1, v2_2) = _e\n _m_R_out[v2_1].remove(v2_2)\n if _m_R_out[v2_1].isempty():\n _m_R_out.delkey(v2_1)\n with MAINT(_m_R_out, 'after', 'R.add((1, 2))'):\n with MAINT(_m_R_in, 'after', 'R.add((1, 2))'):\n R.add((1, 2))\n _maint__m_R_in_add((1, 2))\n _maint__m_R_out_add((1, 2))\n with MAINT(_m_R_out, 'before', 'R.remove((1, 2))'):\n _maint__m_R_out_remove((1, 2))\n with MAINT(_m_R_in, 'before', 'R.remove((1, 2))'):\n _maint__m_R_in_remove((1, 2))\n R.remove((1, 2))\n print(_m_R_out.imglookup(a))\n print(_m_R_in.imglookup(a))\n print(_m_R_out.imglookup(b))\n print(setmatch({(1, 2)}, 'bu', 1))\n with MAINT(_m_S_bbu, 'after', 'S.add((3, 4, 5))'):\n S.add((3, 4, 5))\n _maint__m_S_bbu_add((3, 4, 5))\n print(_m_S_bbu.singlelookup((3, 4)))\n ''')\n \n self.assertEqual(tree, exp_tree)\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.41432639956474304, "alphanum_fraction": 0.4313390254974365, "avg_line_length": 32.75, "blob_id": "58792febd13362bc0b284c3297167d6924d58d4d", "content_id": "cae0911a43e7afce54230a7410dbdf1d3271cfd8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12285, "license_type": "no_license", "max_line_length": 82, "num_lines": 364, "path": "/incoq/tests/invinc/aggr/test_aggr.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Unit tests for aggr.py.\"\"\"\n\n\nimport unittest\n\nimport incoq.compiler.incast as L\nfrom incoq.compiler.set import Mask\nfrom incoq.compiler.central import CentralCase\n\nfrom incoq.compiler.aggr.aggr import *\nfrom incoq.compiler.aggr.aggr import get_cg_class\n\n\nclass SpecCase(unittest.TestCase):\n \n def test_spec(self):\n # Aggregate of a relation.\n node = L.pe('count(R)')\n spec = AggrSpec.from_node(node)\n \n self.assertEqual(spec.aggrop, 'count')\n self.assertEqual(spec.rel, 'R')\n self.assertEqual(spec.relmask, Mask('u'))\n self.assertEqual(spec.params, ())\n self.assertEqual(spec.oper_demname, None)\n self.assertEqual(spec.oper_demparams, None)\n \n constrs = spec.get_domain_constraints('A')\n exp_constrs = []\n self.assertEqual(constrs, exp_constrs)\n \n # Aggregate of a setmatch, with demand.\n node = L.pe('count(DEMQUERY(foo, [c1], '\n 'setmatch(R, \"bub\", (c1, c2))))')\n spec = AggrSpec.from_node(node)\n \n self.assertEqual(spec.aggrop, 'count')\n self.assertEqual(spec.rel, 'R')\n self.assertEqual(spec.relmask, Mask('bub'))\n self.assertEqual(spec.params, ('c1', 'c2'))\n self.assertEqual(spec.oper_demname, 'foo')\n self.assertEqual(spec.oper_demparams, ('c1',))\n \n constrs = spec.get_domain_constraints('A')\n exp_constrs = [('A.1', 'R.1'),\n ('A.2', 'R.3')]\n self.assertEqual(constrs, exp_constrs)\n\nclass DemCountParamDemCase(unittest.TestCase):\n \n \"\"\"Demand-driven count query over an operand with parameters\n and demand.\n \"\"\"\n \n def setUp(self):\n self.aggr = L.pe('count(DEMQUERY(R, [p1], setmatch(R, \"bbu\", (p1, p2))))')\n self.spec = AggrSpec.from_node(self.aggr)\n self.incaggr = IncAggr(self.aggr, self.spec, 'A', 'A', None, False)\n self.cg = get_cg_class(self.spec.aggrop)(self.incaggr)\n \n def test_addu(self):\n code = self.cg.make_addu_maint('_')\n exp_code = L.pc('''\n _val = 0\n for _elem in setmatch(R, 'bbu', (p1, p2)):\n _val = (_val + 1)\n (_1, _2) = (p1, p2)\n A.add((_1, _2, _val))\n demand_R(p1)\n ''')\n self.assertEqual(code, exp_code)\n \n def test_removeu(self):\n code = self.cg.make_removeu_maint('_')\n exp_code = L.pc('''\n undemand_R(p1)\n (_1, _2) = (p1, p2)\n _elem = A.smlookup('bbu', (p1, p2))\n A.remove((_1, _2, _elem))\n ''')\n self.assertEqual(code, exp_code)\n \n def test_oper_maint_add(self):\n code = self.cg.make_oper_maint('_', 'add', L.pe('e'))\n exp_code = L.pc('''\n (_v1, _v2, _v3) = e\n if ((_v1, _v2) in _U_A):\n _val = A.smlookup('bbu', (_v1, _v2))\n _val = (_val + 1)\n (_1, _2) = (_v1, _v2)\n _elem = A.smlookup('bbu', (_v1, _v2))\n A.remove((_1, _2, _elem))\n A.add((_1, _2, _val))\n ''')\n self.assertEqual(code, exp_code)\n \n def test_retrieval_code(self):\n code = self.cg.make_retrieval_code()\n exp_code = L.pe('''\n DEMQUERY(A, [p1, p2], A.smlookup('bbu', (p1, p2)))\n ''')\n self.assertEqual(code, exp_code)\n\nclass DemMinParamNoDemCase(unittest.TestCase):\n \n \"\"\"Demand-driven min query over an operand with parameters but\n no demand.\n \"\"\"\n \n def setUp(self):\n self.aggr = L.pe('min(setmatch(R, \"bbu\", (p1, p2)))')\n self.spec = AggrSpec.from_node(self.aggr)\n self.incaggr = IncAggr(self.aggr, self.spec, 'A', 'A', None, False)\n self.cg = get_cg_class(self.spec.aggrop)(self.incaggr)\n \n def test_addu(self):\n code = self.cg.make_addu_maint('_')\n exp_code = L.pc('''\n _val = (Tree(), None)\n for _elem in setmatch(R, 'bbu', (p1, p2)):\n (_tree, _) = _val\n _tree[_elem] = None\n _val = (_tree, _tree.__min__())\n (_1, _2) = (p1, p2)\n A.add((_1, _2, _val))\n ''')\n self.assertEqual(code, exp_code)\n \n def test_removeu(self):\n code = self.cg.make_removeu_maint('_')\n exp_code = L.pc('''\n (_1, _2) = (p1, p2)\n _elem = A.smlookup('bbu', (p1, p2))\n A.remove((_1, _2, _elem))\n ''')\n self.assertEqual(code, exp_code)\n \n def test_oper_maint_remove(self):\n code = self.cg.make_oper_maint('_', 'remove', L.pe('e'))\n exp_code = L.pc('''\n (_v1, _v2, _v3) = e\n if ((_v1, _v2) in _U_A):\n _val = A.smlookup('bbu', (_v1, _v2))\n (_tree, _) = _val\n del _tree[_v3]\n _val = (_tree, _tree.__min__())\n (_1, _2) = (_v1, _v2)\n _elem = A.smlookup('bbu', (_v1, _v2))\n A.remove((_1, _2, _elem))\n A.add((_1, _2, _val))\n ''')\n self.assertEqual(code, exp_code)\n\nclass DemSumNoParamCase(unittest.TestCase):\n \n \"\"\"Demand-driven sum query over an operand with no parameters and\n no demand.\n \"\"\"\n \n def setUp(self):\n self.aggr = L.pe('sum(R)')\n self.spec = AggrSpec.from_node(self.aggr)\n self.incaggr = IncAggr(self.aggr, self.spec, 'A', 'A', None, False)\n self.cg = get_cg_class(self.spec.aggrop)(self.incaggr)\n \n def test_addu(self):\n code = self.cg.make_addu_maint('_')\n exp_code = L.pc('''\n _val = 0\n for _elem in setmatch(R, 'u', ()):\n _val = (_val + _elem)\n _ = ()\n A.add(_val)\n ''')\n self.assertEqual(code, exp_code)\n \n def test_removeu(self):\n code = self.cg.make_removeu_maint('_')\n exp_code = L.pc('''\n _ = ()\n _elem = A.smlookup('u', ())\n A.remove(_elem)\n ''')\n self.assertEqual(code, exp_code)\n \n def test_oper_maint_add(self):\n code = self.cg.make_oper_maint('_', 'add', L.pe('e'))\n exp_code = L.pc('''\n _v1 = e\n if (() in _U_A):\n _val = A.smlookup('u', ())\n _val = (_val + _v1)\n _ = ()\n _elem = A.smlookup('u', ())\n A.remove(_elem)\n A.add(_val)\n ''')\n self.assertEqual(code, exp_code)\n\nclass NoDemCountParamNoDemCase(unittest.TestCase):\n \n \"\"\"Count query over an operand with parameters, but no demand for\n aggregate nor operand.\n \"\"\"\n \n def setUp(self):\n self.aggr = L.pe('count(setmatch(R, \"bbu\", (p1, p2)))')\n self.spec = AggrSpec.from_node(self.aggr)\n self.incaggr = IncAggr(self.aggr, self.spec, 'A', None, None, False)\n self.cg = get_cg_class(self.spec.aggrop)(self.incaggr)\n \n def test_oper_maint_add(self):\n code = self.cg.make_oper_maint('_', 'add', L.pe('e'))\n exp_code = L.pc('''\n (_v1, _v2, _v3) = e\n _val = A.smdeflookup('bbu', (_v1, _v2), (0, 0))\n (_state, _count) = _val\n _state = (_state + 1)\n _val = (_state, (_count + 1))\n (_1, _2) = (_v1, _v2)\n if (not setmatch(A, 'bbu', (_v1, _v2)).isempty()):\n _elem = A.smlookup('bbu', (_v1, _v2))\n A.remove((_1, _2, _elem))\n A.add((_1, _2, _val))\n ''')\n self.assertEqual(code, exp_code)\n \n def test_retrieval_code(self):\n code = self.cg.make_retrieval_code()\n exp_code = L.pe('''\n A.smdeflookup('bbu', (p1, p2), (0, 0))[0]\n ''')\n self.assertEqual(code, exp_code)\n\nclass HalfDemSumParamNoDemCase(unittest.TestCase):\n \n \"\"\"Half-demand sum query over an operand with parameters but\n no demand.\n \"\"\"\n \n def setUp(self):\n self.aggr = L.pe('sum(setmatch(R, \"bbu\", (p1, p2)))')\n self.spec = AggrSpec.from_node(self.aggr)\n self.incaggr = IncAggr(self.aggr, self.spec, 'A', 'A', None, True)\n self.cg = get_cg_class(self.spec.aggrop)(self.incaggr)\n \n def test_addu(self):\n code = self.cg.make_addu_maint('_')\n exp_code = L.pc('''\n _val = A.smdeflookup('bbu', (p1, p2), None)\n if (_val is None):\n (_1, _2) = (p1, p2)\n A.add((_1, _2, (0, 0)))\n ''')\n self.assertEqual(code, exp_code)\n \n def test_removeu(self):\n code = self.cg.make_removeu_maint('_')\n exp_code = L.pc('''\n _val = A.smlookup('bbu', (p1, p2))\n if (_val[1] == 0):\n (_1, _2) = (p1, p2)\n _elem = A.smlookup('bbu', (p1, p2))\n A.remove((_1, _2, _elem))\n ''')\n self.assertEqual(code, exp_code)\n \n def test_oper_maint_remove(self):\n code = self.cg.make_oper_maint('_', 'remove', L.pe('e'))\n exp_code = L.pc('''\n (_v1, _v2, _v3) = e\n _val = A.smlookup('bbu', (_v1, _v2))\n if ((_val[1] == 1) and ((_v1, _v2) not in _U_A)):\n (_1, _2) = (_v1, _v2)\n _elem = A.smlookup('bbu', (_v1, _v2))\n A.remove((_1, _2, _elem))\n else:\n (_state, _count) = _val\n _state = (_state - _v3)\n _val = (_state, (_count - 1))\n (_1, _2) = (_v1, _v2)\n _elem = A.smlookup('bbu', (_v1, _v2))\n A.remove((_1, _2, _elem))\n A.add((_1, _2, _val))\n ''')\n self.assertEqual(code, exp_code)\n\nclass TransformCase(CentralCase):\n \n def test_transform_noparams_nodem(self):\n aggr_node = L.pe('sum(R, {})')\n tree = L.p('''\n R.add(5)\n print(AGGR)\n ''', subst={'AGGR': aggr_node})\n tree = inc_aggr(tree, self.manager, aggr_node, 'A',\n demand=True, half_demand=False)\n \n exp_tree = L.p('''\n A = Set()\n def _maint_A_add(_e):\n v1_v1 = _e\n if (() in _U_A):\n v1_val = A.smlookup('u', ())\n v1_val = (v1_val + v1_v1)\n _ = ()\n v1_elem = A.smlookup('u', ())\n A.remove(v1_elem)\n A.add(v1_val)\n \n def _maint_A_remove(_e):\n v2_v1 = _e\n if (() in _U_A):\n v2_val = A.smlookup('u', ())\n v2_val = (v2_val - v2_v1)\n _ = ()\n v2_elem = A.smlookup('u', ())\n A.remove(v2_elem)\n A.add(v2_val)\n \n _U_A = RCSet()\n _UEXT_A = Set()\n def demand_A():\n 'sum(R, None)'\n if (() not in _U_A):\n with MAINT(A, 'after', '_U_A.add(())'):\n _U_A.add(())\n v3_val = 0\n for v3_elem in setmatch(R, 'u', ()):\n v3_val = (v3_val + v3_elem)\n _ = ()\n A.add(v3_val)\n else:\n _U_A.incref(())\n \n def undemand_A():\n 'sum(R, None)'\n if (_U_A.getref(()) == 1):\n with MAINT(A, 'before', '_U_A.remove(())'):\n _ = ()\n v4_elem = A.smlookup('u', ())\n A.remove(v4_elem)\n _U_A.remove(())\n else:\n _U_A.decref(())\n \n def query_A():\n 'sum(R, None)'\n if (() not in _UEXT_A):\n _UEXT_A.add(())\n demand_A()\n return True\n \n with MAINT(A, 'after', 'R.add(5)'):\n R.add(5)\n _maint_A_add(5)\n print(DEMQUERY(A, [], A.smlookup('u', (),)))\n ''')\n \n self.assertEqual(tree, exp_tree)\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.7795918583869934, "alphanum_fraction": 0.7877551317214966, "avg_line_length": 60.25, "blob_id": "ab5f5e8215d4f9ddf811b500680329b4981e412a", "content_id": "a3a40ca14e19420c5338f8d458107c3bb82b4d04", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 245, "license_type": "no_license", "max_line_length": 78, "num_lines": 4, "path": "/experiments/jql/java/jqlexp/readme.txt", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "This is an implementation of the queries from the WPN08 paper, integrated into\nthe frexp experimentation framework. Original JQL example files are by\nDarren Willis, David James Pearce, and James Noble. Query driver programs and\nthe frexp interface layer are by Jon Brandvein.\n" }, { "alpha_fraction": 0.5740740895271301, "alphanum_fraction": 0.5740740895271301, "avg_line_length": 29.85714340209961, "blob_id": "f8bd5d1f34a52c94bb85be5a62c27ce9fae43e30", "content_id": "768bd73ff1ed2902cc97043ce973b3ec47a292d4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3024, "license_type": "no_license", "max_line_length": 71, "num_lines": 98, "path": "/incoq/compiler/aggr/aggrcomp.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Aggregates inside comprehensions.\"\"\"\n\n\n__all__ = [\n 'flatten_smlookups',\n]\n\n\nfrom collections import OrderedDict\nimport incoq.compiler.incast as L\n\n\nclass LookupReplacer(L.NodeTransformer):\n \n \"\"\"Replaces each unique SMLookup expression, or SMLookup expression\n wrapped in a DemQuery node, with a fresh variable. Returns the new\n tree and a list of clauses defining the new variables.\n \n Does not traverse the RHS of an enumerator.\n \"\"\"\n \n def __init__(self, namer):\n super().__init__()\n self.namer = namer\n \"\"\"Generator for fresh names.\"\"\"\n self.repls = OrderedDict()\n \"\"\"Mapping from nodes to replacement variables.\"\"\"\n \n def process(self, tree):\n # Mark nodes that are wrapped by DemQuery nodes so we know not\n # to treat them separately.\n self.demwrapped_nodes = set()\n self.new_clauses = []\n tree = super().process(tree)\n return tree, self.new_clauses\n \n def visit_Enumerator(self, node):\n # Don't touch the RHS. We don't want to clobber an SMLookup\n # in a clause added by a previous run of flatten_smlookups.\n target = self.visit(node.target)\n node._replace(target=target)\n \n def visit_SMLookup(self, node):\n node = self.generic_visit(node)\n \n if node in self.demwrapped_nodes:\n return node\n \n sm = node\n assert sm.default is None\n \n v = self.repls.get(node, None)\n if v is not None:\n var = v\n else:\n self.repls[node] = var = next(self.namer)\n cl_target = L.sn(var)\n cl_iter = L.Set((sm,))\n new_cl = L.Enumerator(cl_target, cl_iter)\n self.new_clauses.append(new_cl)\n \n return L.ln(var)\n \n def visit_DemQuery(self, node):\n self.demwrapped_nodes.add(node.value)\n node = self.generic_visit(node)\n \n if not isinstance(node.value, L.SMLookup):\n return node\n sm = node.value\n assert sm.default is None\n \n v = self.repls.get(node, None)\n if v is not None:\n # Reuse existing entry.\n var = v\n else:\n # Create new entry.\n self.repls[node] = var = next(self.namer)\n # Create accompanying clause. Has form\n # var in DEMQUERY(..., {smlookup})\n # The clause constructor logic will later rewrite that,\n # or else fail if there's a syntax problem.\n cl_target = L.sn(var)\n cl_iter = node._replace(value=L.Set((sm,)))\n new_cl = L.Enumerator(cl_target, cl_iter)\n self.new_clauses.append(new_cl)\n \n return L.ln(var)\n\n\ndef flatten_smlookups(comp):\n \"\"\"Given a comprehension, flatten any demand-driven setmap lookups\n (e.g. for aggregates).\n \"\"\"\n namer = L.NameGenerator('_av{}')\n replacer = LookupReplacer(namer)\n return L.rewrite_compclauses(comp, replacer.process)\n" }, { "alpha_fraction": 0.4466608762741089, "alphanum_fraction": 0.5177797079086304, "avg_line_length": 33.969696044921875, "blob_id": "798b136dec8a043c201c5f3830cf2a367196d060", "content_id": "8e2dc8c4fc047386a73473930422bd3bc732f456", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1153, "license_type": "no_license", "max_line_length": 60, "num_lines": 33, "path": "/incoq/tests/programs/auxmap/wildcard_out.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from incoq.runtime import *\n_m_P_uwb = Map()\ndef _maint__m_P_uwb_add(_e):\n (v1_1, v1_2, v1_3) = _e\n if (v1_3 not in _m_P_uwb):\n _m_P_uwb[v1_3] = RCSet()\n if (v1_1 not in _m_P_uwb[v1_3]):\n _m_P_uwb[v1_3].add(v1_1)\n else:\n _m_P_uwb[v1_3].incref(v1_1)\n\ndef _maint__m_P_uwb_remove(_e):\n (v2_1, v2_2, v2_3) = _e\n if (_m_P_uwb[v2_3].getref(v2_1) == 1):\n _m_P_uwb[v2_3].remove(v2_1)\n else:\n _m_P_uwb[v2_3].decref(v2_1)\n if (len(_m_P_uwb[v2_3]) == 0):\n del _m_P_uwb[v2_3]\n\nfor v in [(1, 1, 2), (1, 2, 2), (3, 4, 2), (5, 6, 7)]:\n # Begin maint _m_P_uwb after \"P.add(v)\"\n _maint__m_P_uwb_add(v)\n # End maint _m_P_uwb after \"P.add(v)\"\nprint(sorted((_m_P_uwb[2] if (2 in _m_P_uwb) else RCSet())))\n# Begin maint _m_P_uwb before \"P.remove((1, 1, 2))\"\n_maint__m_P_uwb_remove((1, 1, 2))\n# End maint _m_P_uwb before \"P.remove((1, 1, 2))\"\nprint(sorted((_m_P_uwb[2] if (2 in _m_P_uwb) else RCSet())))\n# Begin maint _m_P_uwb before \"P.remove((1, 2, 2))\"\n_maint__m_P_uwb_remove((1, 2, 2))\n# End maint _m_P_uwb before \"P.remove((1, 2, 2))\"\nprint(sorted((_m_P_uwb[2] if (2 in _m_P_uwb) else RCSet())))" }, { "alpha_fraction": 0.5402930378913879, "alphanum_fraction": 0.5769230723381042, "avg_line_length": 21.75, "blob_id": "da8feb79ab3c3e6a92ece42a784937cd686a7313", "content_id": "bedbb40cdb00edf9ed2b0c380543d6dea726ab46", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 546, "license_type": "no_license", "max_line_length": 95, "num_lines": 24, "path": "/incoq/tests/programs/comp/sjsub_in.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "# Self-joins using subtractive clauses.\n\nfrom incoq.runtime import *\n\nOPTIONS(\n selfjoin_strat = 'sub',\n)\n\nQUERYOPTIONS(\n '{z for (x, x2) in E for (x3, y) in E for (y2, z) in S if x == x2 if x2 == x3 if y == y2}',\n impl = 'inc',\n)\n\nE = Set()\nS = Set()\n\n# If the reference counts get bungled due to double-counting on E.add,\n# we should see an erroneous non-empty result.\nS.add((1, 2))\nE.add((1, 1))\nS.remove((1, 2))\n\nprint(sorted({z for (x, x2) in E for (x3, y) in E for (y2, z) in S\n if x == x2 if x2 == x3 if y == y2}))\n" }, { "alpha_fraction": 0.4568205177783966, "alphanum_fraction": 0.4666171371936798, "avg_line_length": 25.956195831298828, "blob_id": "c2c73086c0124a6a97f635eb9dd6f0e1561e1869", "content_id": "2e2a7dc57d7ba84f7c31b5cd9dec01b1e4ef86b7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 21538, "license_type": "no_license", "max_line_length": 77, "num_lines": 799, "path": "/experiments/distalgo/run_distalgo_exp.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Run distalgo experiments.\"\"\"\n\n\nimport pickle\nimport os\n\nfrom frexp import (ExpWorkflow, Datagen,\n Extractor)\n\nfrom .distalgo_bridge import get_config, launch\n\nfrom experiments.util import SmallExtractor, LargeExtractor\n\n\nclass DistalgoDatagen(Datagen):\n \n \"\"\"Stub datagen. Args for P depends on distalgo program.\"\"\"\n \n def generate(self, P):\n return dict(\n dsparams = P,\n )\n\nclass DistalgoDriver:\n \n argnames = None\n \n def __init__(self, pipe_filename):\n with open(pipe_filename, 'rb') as pf:\n dataset, prog, other_tparams = pickle.load(pf)\n os.remove(pipe_filename)\n \n self.prog = prog\n self.module = None\n \n dafile = other_tparams.get('dafile', self.dafilename)\n \n P = dataset['dsparams']\n args = [str(P[key]) for key in self.argnames]\n \n config = get_config()\n res = launch(config, dafile,\n prog, args)\n \n self.results = {}\n self.results['time_cpu'] = res['Total process time']\n self.results['time_wall'] = res['Wall time']\n self.results['stdmetric'] = self.results['time_cpu']\n \n with open(pipe_filename, 'wb') as pf:\n pickle.dump(self.results, pf)\n\nclass DistalgoWorkflow(ExpWorkflow):\n \n require_ac = False\n \n class ExpDatagen(DistalgoDatagen):\n \n use_progs_ex = False\n \"\"\"If True, the implementations to run are specified using\n progs_ex instead of progs. progs_ex is a list of pairs of\n a dafile name and an inc interface file name.\n \"\"\"\n \n def get_tparams_list(self, dsparams_list):\n if not self.use_progs_ex:\n return super().get_tparams_list(dsparams_list)\n \n return [\n dict(\n tid = dsp['dsid'],\n dsid = dsp['dsid'],\n prog = prog,\n dafile = dafile,\n )\n for dafile, prog in self.progs_ex\n for dsp in dsparams_list\n ]\n \n stddev_window = .1\n min_repeats = 1\n max_repeats = 1\n \n class ExpExtractor(SmallExtractor, Extractor):\n \n name = None\n noninline = False\n \n show_cpu = True\n show_wall = False\n \n # Doesn't work since we have multiple metrics to output.\n generate_csv = False\n \n series_template = [\n (('in', 'time_cpu'), 'original (total cpu time)',\n 'red', '- s normal'),\n (('in', 'time_wall'), 'original (wall time)',\n 'red', '1-2 _s normal'),\n (('inc', 'time_cpu'), 'incremental (total cpu time)',\n 'blue', '- ^ normal'),\n (('inc', 'time_wall'), 'incremental (wall time)',\n 'blue', '1-2 _^ normal'),\n (('inc_lru', 'time_cpu'), 'incremental (total cpu time)',\n 'blue', '- ^ normal'),\n (('inc_lru', 'time_wall'), 'incremental (wall time)',\n 'blue', '1-2 _^ normal'),\n (('dem', 'time_cpu'), 'filtered (total cpu time)',\n 'green', '- ^ normal'),\n (('dem', 'time_wall'), 'filtered (wall time)',\n 'green', '1-2 _^ normal'),\n \n (('dem_subdem', 'time_cpu'), 'filtered, subdem (total cpu time)',\n '#004400', '- ^ normal'),\n (('dem_subdem', 'time_wall'), 'filtered, subdem (wall time)',\n '#004400', '1-2 _^ normal'),\n \n (('opt in', 'time_cpu'), 'opt. original (total cpu time)',\n '#FFAAAA', '- s normal'),\n (('opt in', 'time_wall'), 'opt. original (wall time)',\n '#FFAAAA', '1-2 _s normal'),\n (('opt dem', 'time_cpu'), 'opt. filtered (total cpu time)',\n 'lightgreen', '- ^ normal'),\n (('opt dem', 'time_wall'), 'opt. filtered (wall time)',\n 'lightgreen', '1-2 _^ normal'),\n ]\n \n @property\n def series(self):\n series = list(self.series_template)\n for i, s in enumerate(series):\n (prog, metric), label, color, format = s\n if ((metric == 'time_cpu' and not self.show_cpu) or\n (metric == 'time_wall' and not self.show_wall)):\n continue\n \n if prog == 'dem':\n new_prog = '{}_inc_dem{}'.format(\n self.name,\n '_noninline' if self.noninline else '')\n elif prog.startswith('opt '):\n new_prog = self.name + '_opt_inc_' + prog[4:]\n else:\n new_prog = self.name + '_inc_' + prog\n series[i] = (new_prog, metric), label, color, format\n return series\n \n # Hack it so we can project based on different metrics for\n # different sids. The proper refactoring would be to pass\n # sid to project_y() and the other functions that call it.\n \n def get_series_data(self, datapoints, sid):\n prog, metric = sid\n data = [p for p in datapoints if p['prog'] == prog]\n # Hack on a metric flag.\n for p in data:\n p['metric'] = metric\n return data\n \n def project_y(self, p):\n return p['results'][p['metric']]\n\n\nclass CLPaxosDriver(DistalgoDriver):\n dafilename = 'clpaxos/clpaxos.da'\n argnames = ['n_prop', 'n_acc', 'n_rounds', 'timeout']\n\nclass CLPaxos(DistalgoWorkflow):\n \n prefix = 'results/clpaxos'\n \n ExpDriver = CLPaxosDriver\n \n class ExpDatagen(DistalgoWorkflow.ExpDatagen):\n \n progs = [\n 'clpaxos_inc_in',\n 'clpaxos_inc_inc',\n# 'clpaxos_inc_dem',\n ]\n \n def get_dsparams_list(self):\n return [\n dict(\n dsid = str(x),\n x = x,\n \n n_prop = x * 3,\n n_acc = x * 1,\n n_rounds = 1,\n timeout = 3,\n )\n for x in range(1, 6 + 1, 1)\n ]\n \n class ExpExtractor(DistalgoWorkflow.ExpExtractor):\n \n name = 'clpaxos'\n noninline = True\n show_wall = True\n \n ylabel = 'Running time (in seconds)'\n xlabel = 'Number of processes'\n\n\nclass CRLeaderDriver(DistalgoDriver):\n dafilename = 'crleader/crleader.da'\n argnames = ['n_procs']\n\nclass CRLeader(DistalgoWorkflow):\n \n prefix = 'results/crleader'\n \n ExpDriver = CRLeaderDriver\n \n class ExpDatagen(DistalgoWorkflow.ExpDatagen):\n \n progs = [\n 'crleader_inc_in',\n 'crleader_inc_dem',\n ]\n \n def get_dsparams_list(self):\n return [\n dict(\n dsid = str(x),\n x = x,\n \n n_procs = x,\n )\n for x in range(10, 80 + 1, 10)\n ]\n \n# min_repeats = 5\n# max_repeats = 5\n \n class ExpExtractor(DistalgoWorkflow.ExpExtractor):\n \n name = 'crleader'\n \n show_wall = True\n \n ylabel = 'Running time (in seconds)'\n xlabel = 'Number of processes'\n\n\nclass DSCrashDriver(DistalgoDriver):\n dafilename = 'dscrash/dscrash.da'\n argnames = ['n_procs', 'maxfail']\n\nclass DSCrash(DistalgoWorkflow):\n \n prefix = 'results/dscrash'\n \n ExpDriver = DSCrashDriver\n \n class ExpDatagen(DistalgoWorkflow.ExpDatagen):\n \n progs = [\n 'dscrash_inc_in',\n 'dscrash_inc_dem',\n ]\n \n def get_dsparams_list(self):\n return [\n dict(\n dsid = str(x),\n x = x,\n \n n_procs = x,\n maxfail = 2,#int(0.25 * x),\n )\n for x in range(5, 100 + 1, 5)\n ]\n \n class ExpExtractor(DistalgoWorkflow.ExpExtractor):\n \n name = 'dscrash'\n \n ylabel = 'Running time (in seconds)'\n xlabel = 'Number of processes'\n\n\nclass HSLeaderDriver(DistalgoDriver):\n dafilename = 'hsleader/hsleader.da'\n argnames = ['n_procs']\n\nclass HSLeader(DistalgoWorkflow):\n \n prefix = 'results/hsleader'\n \n ExpDriver = HSLeaderDriver\n \n class ExpDatagen(DistalgoWorkflow.ExpDatagen):\n \n progs = [\n 'hsleader_inc_in',\n 'hsleader_inc_inc',\n# 'hsleader_inc_dem',\n ]\n \n def get_dsparams_list(self):\n return [\n dict(\n dsid = str(x),\n x = x,\n \n n_procs = x,\n )\n for x in range(10, 100 + 1, 10)\n ]\n \n# min_repeats = 5\n# max_repeats = 5\n \n class ExpExtractor(DistalgoWorkflow.ExpExtractor):\n \n name = 'hsleader'\n \n ylabel = 'Running time (in seconds)'\n xlabel = 'Number of processes'\n\n\nclass LAMutexDriver(DistalgoDriver):\n dafilename = 'lamutex/lamutex.da'\n argnames = ['n_procs', 'n_rounds']\n\nclass LAMutexSpecWorkflow(DistalgoWorkflow):\n \n ExpDriver = LAMutexDriver\n \n class ExpDatagen(DistalgoWorkflow.ExpDatagen):\n \n use_progs_ex = True\n progs_ex = [\n ('lamutex/lamutex.da', 'lamutex_inc_in'),\n ('lamutex/lamutex.da', 'lamutex_inc_inc'),\n ]\n \n class ExpExtractor(DistalgoWorkflow.ExpExtractor):\n name = 'lamutex'\n show_wall = True\n \n min_repeats = 5\n max_repeats = 5\n\nclass LAMutexSpecOptWorkflow(DistalgoWorkflow):\n \n ExpDriver = LAMutexDriver\n \n class ExpDatagen(DistalgoWorkflow.ExpDatagen):\n \n use_progs_ex = True\n progs_ex = [\n# ('lamutex/lamutex_opt1.da', 'lamutex_opt1_inc_in'),\n# ('lamutex/lamutex_opt1.da', 'lamutex_opt1_inc_inc'),\n \n ('lamutex/lamutex_opt2.da', 'lamutex_opt2_inc_in'),\n ('lamutex/lamutex_opt2.da', 'lamutex_opt2_inc_inc'),\n ]\n \n class ExpExtractor(DistalgoWorkflow.ExpExtractor):\n name = 'lamutex_opt2'\n show_wall = True\n \n min_repeats = 5\n max_repeats = 5\n\nclass LAMutexOrigWorkflow(DistalgoWorkflow):\n \n ExpDriver = LAMutexDriver\n \n class ExpDatagen(DistalgoWorkflow.ExpDatagen):\n \n use_progs_ex = True\n progs_ex = [\n ('lamutex/lamutex_orig.da', 'lamutex_orig_inc_in'),\n ('lamutex/lamutex_orig.da', 'lamutex_orig_inc_inc'),\n ]\n \n class ExpExtractor(DistalgoWorkflow.ExpExtractor):\n name = 'lamutex_orig'\n show_wall = True\n \n min_repeats = 5\n max_repeats = 5\n\nclass LAMutexSpecProcs(LAMutexSpecWorkflow):\n \n prefix = 'results/lamutexspec_procs'\n \n class ExpDatagen(LAMutexSpecWorkflow.ExpDatagen):\n \n def get_dsparams_list(self):\n return [\n dict(\n dsid = str(x),\n x = x,\n \n n_procs = x,\n n_rounds = 10,\n )\n for x in range(3, 30 + 1, 3)\n ]\n \n class ExpExtractor(LAMutexSpecWorkflow.ExpExtractor):\n ylabel = 'Running time (in seconds)'\n xlabel = 'Number of processes' \n xmin = 1\n xmax = 31\n\nclass LAMutexSpecRounds(LAMutexSpecWorkflow):\n \n prefix = 'results/lamutexspec_rounds'\n \n class ExpDatagen(LAMutexSpecWorkflow.ExpDatagen):\n def get_dsparams_list(self):\n return [\n dict(\n dsid = str(x),\n x = x,\n \n n_procs = 10,\n n_rounds = x,\n )\n for x in range(3, 30 + 1, 3)\n ]\n \n class ExpExtractor(LAMutexSpecWorkflow.ExpExtractor):\n ylabel = 'Running time (in seconds)'\n xlabel = 'Number of rounds'\n xmin = 1\n xmax = 31\n\nclass LAMutexSpecOptProcs(LAMutexSpecOptWorkflow):\n \n prefix = 'results/lamutexspecopt_procs'\n \n class ExpDatagen(LAMutexSpecOptWorkflow.ExpDatagen):\n \n def get_dsparams_list(self):\n return [\n dict(\n dsid = str(x),\n x = x,\n \n n_procs = x,\n n_rounds = 10,\n )\n for x in range(3, 30 + 1, 3)\n ]\n \n class ExpExtractor(LAMutexSpecOptWorkflow.ExpExtractor):\n ylabel = 'Running time (in seconds)'\n xlabel = 'Number of processes'\n xmin = 1\n xmax = 31\n\nclass LAMutexSpecOptRounds(LAMutexSpecOptWorkflow):\n \n prefix = 'results/lamutexspecopt_rounds'\n \n class ExpDatagen(LAMutexSpecOptWorkflow.ExpDatagen):\n def get_dsparams_list(self):\n return [\n dict(\n dsid = str(x),\n x = x,\n \n n_procs = 10,\n n_rounds = x,\n )\n for x in range(3, 30 + 1, 3)\n ]\n \n class ExpExtractor(LAMutexSpecOptWorkflow.ExpExtractor):\n ylabel = 'Running time (in seconds)'\n xlabel = 'Number of rounds'\n xmin = 1\n xmax = 31\n\nclass LAMutexOrigProcs(LAMutexOrigWorkflow):\n \n prefix = 'results/lamutexorig_procs'\n \n class ExpDatagen(LAMutexOrigWorkflow.ExpDatagen):\n \n def get_dsparams_list(self):\n return [\n dict(\n dsid = str(x),\n x = x,\n \n n_procs = x,\n n_rounds = 5,\n )\n for x in range(5, 50 + 1, 5)\n ]\n \n class ExpExtractor(LAMutexOrigWorkflow.ExpExtractor):\n ylabel = 'Running time (in seconds)'\n xlabel = 'Number of processes'\n xmin = 5\n xmax = 55\n\nclass LAMutexOrigRounds(LAMutexOrigWorkflow):\n \n prefix = 'results/lamutexorig_rounds'\n \n class ExpDatagen(LAMutexOrigWorkflow.ExpDatagen):\n \n def get_dsparams_list(self):\n return [\n dict(\n dsid = str(x),\n x = x,\n \n n_procs = 5,\n n_rounds = x,\n )\n for x in range(100, 1000 + 1, 100)\n ]\n \n class ExpExtractor(LAMutexOrigWorkflow.ExpExtractor):\n ylabel = 'Running time (in seconds)'\n xlabel = 'Number of rounds'\n xmin = 50\n xmax = 1050\n\n\nclass LAPaxosDriver(DistalgoDriver):\n dafilename = 'lapaxos/lapaxos.da'\n argnames = ['n_prop', 'n_acc', 'n_rounds']\n\nclass LAPaxos(DistalgoWorkflow):\n \n prefix = 'results/lapaxos'\n \n ExpDriver = LAPaxosDriver\n \n class ExpDatagen(DistalgoWorkflow.ExpDatagen):\n \n progs = [\n 'lapaxos_inc_in',\n# 'lapaxos_inc_inc',\n# 'lapaxos_inc_dem',\n ]\n \n def get_dsparams_list(self):\n return [\n dict(\n dsid = str(x),\n x = x,\n \n n_prop = x,\n n_acc = x * 3,\n n_rounds = 3,\n )\n for x in [1]#[1, 3, 6, 9, 12]\n ]\n \n class ExpExtractor(DistalgoWorkflow.ExpExtractor):\n \n name = 'lapaxos'\n noninline = True\n \n ylabel = 'Running time (in seconds)'\n xlabel = 'Number of processes'\n\n\nclass RAMutexDriver(DistalgoDriver):\n dafilename = 'ramutex/ramutex.da'\n argnames = ['n_procs', 'n_rounds']\n\nclass RAMutex(DistalgoWorkflow):\n \n prefix = 'results/ramutex'\n \n ExpDriver = RAMutexDriver\n \n class ExpDatagen(DistalgoWorkflow.ExpDatagen):\n \n progs = [\n 'ramutex_inc_in',\n 'ramutex_inc_inc',\n 'ramutex_inc_dem',\n ]\n \n def get_dsparams_list(self):\n return [\n dict(\n dsid = str(x),\n x = x,\n \n n_procs = x,\n n_rounds = 10,\n )\n for x in range(2, 20 + 1, 2)\n ]\n \n# min_repeats = 5\n# max_repeats = 5\n \n class ExpExtractor(DistalgoWorkflow.ExpExtractor):\n \n name = 'ramutex'\n \n ylabel = 'Running time (in seconds)'\n xlabel = 'Number of processes'\n\n\nclass RATokenDriver(DistalgoDriver):\n dafilename = 'ratoken/ratoken.da'\n argnames = ['n_procs', 'n_rounds']\n\nclass RATokenProcs(DistalgoWorkflow):\n \n prefix = 'results/ratoken'\n \n ExpDriver = RATokenDriver\n \n class ExpDatagen(DistalgoWorkflow.ExpDatagen):\n \n progs = [\n 'ratoken_inc_in',\n 'ratoken_inc_dem',\n ]\n \n def get_dsparams_list(self):\n return [\n dict(\n dsid = str(x),\n x = x,\n \n n_procs = x,\n n_rounds = 10,\n )\n for x in range(10, 70 + 1, 10)\n ]\n \n# min_repeats = 3\n# max_repeats = 3\n \n class ExpExtractor(DistalgoWorkflow.ExpExtractor):\n \n name = 'ratoken'\n \n ylabel = 'Running time (in seconds)'\n xlabel = 'Number of processes'\n\nclass RATokenRounds(DistalgoWorkflow):\n \n prefix = 'results/ratoken_rounds'\n \n ExpDriver = RATokenDriver\n \n class ExpDatagen(DistalgoWorkflow.ExpDatagen):\n \n progs = [\n# 'ratoken_inc_in',\n 'ratoken_inc_dem',\n ]\n \n def get_dsparams_list(self):\n return [\n dict(\n dsid = str(x),\n x = x,\n \n n_procs = 20,\n n_rounds = x,\n )\n for x in range(10, 100 + 1, 10)\n ]\n \n# min_repeats = 3\n# max_repeats = 3\n \n class ExpExtractor(DistalgoWorkflow.ExpExtractor):\n \n name = 'ratoken'\n \n ylabel = 'Running time (in seconds)'\n xlabel = 'Number of rounds'\n\n\nclass SKTokenDriver(DistalgoDriver):\n dafilename = 'sktoken/sktoken.da'\n argnames = ['n_procs', 'n_rounds']\n\nclass SKToken(DistalgoWorkflow):\n \n prefix = 'results/sktoken'\n \n ExpDriver = SKTokenDriver\n \n class ExpDatagen(DistalgoWorkflow.ExpDatagen):\n \n progs = [\n 'sktoken_inc_in',\n 'sktoken_inc_dem',\n ]\n \n def get_dsparams_list(self):\n return [\n dict(\n dsid = str(x),\n x = x,\n \n n_procs = x,\n n_rounds = 10,\n )\n for x in range(5, 40 + 1, 5)\n ]\n \n min_repeats = 3\n max_repeats = 3\n \n class ExpExtractor(DistalgoWorkflow.ExpExtractor):\n \n name = 'sktoken'\n \n ylabel = 'Running time (in seconds)'\n xlabel = 'Number of processes'\n\n\nclass TPCommitDriver(DistalgoDriver):\n dafilename = 'tpcommit/tpcommit.da'\n argnames = ['n_procs', 'failrate']\n\nclass TPCommit(DistalgoWorkflow):\n \n prefix = 'results/twophasecommit'\n \n ExpDriver = TPCommitDriver\n \n class ExpDatagen(DistalgoWorkflow.ExpDatagen):\n \n progs = [\n 'tpcommit_inc_in',\n 'tpcommit_inc_inc',\n# 'tpcommit_inc_dem',\n ]\n \n def get_dsparams_list(self):\n return [\n dict(\n dsid = str(x),\n x = x,\n \n n_procs = x,\n failrate = 10,\n )\n for x in range(10, 60 + 1, 10)\n ]\n \n min_repeats = 5\n max_repeats = 5\n \n class ExpExtractor(DistalgoWorkflow.ExpExtractor):\n \n name = 'tpcommit'\n \n ylabel = 'Running time (in seconds)'\n xlabel = 'Number of proposers'\n\n\nclass VRPaxosDriver(DistalgoDriver):\n dafilename = 'vrpaxos/vrpaxos.da'\n argnames = []\n\nclass VRPaxos(DistalgoWorkflow):\n \n prefix = 'results/vrpaxos'\n \n ExpDriver = VRPaxosDriver\n \n class ExpDatagen(DistalgoWorkflow.ExpDatagen):\n \n progs = [\n 'vrpaxos_inc_in',\n 'vrpaxos_inc_dem',\n ]\n \n def get_dsparams_list(self):\n return [\n dict(\n dsid = str(x),\n x = x,\n )\n for x in [1]\n ]\n \n class ExpExtractor(DistalgoWorkflow.ExpExtractor):\n \n name = 'vrpaxos'\n \n ylabel = 'Running time (in seconds)'\n xlabel = 'Number of processes'\n" }, { "alpha_fraction": 0.41287878155708313, "alphanum_fraction": 0.4131701588630676, "avg_line_length": 28.973798751831055, "blob_id": "f81ca3cbc2f43e225ef58febba876060af133563", "content_id": "d36a34be3aa93a7f8d894c425cd11dca17763310", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6864, "license_type": "no_license", "max_line_length": 78, "num_lines": 229, "path": "/incoq/compiler/obj/match.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Pair-relation match operations.\"\"\"\n\n\n__all__ = [\n 'mset_bindmatch',\n 'fset_bindmatch',\n 'mapset_bindmatch',\n]\n\n\nimport incoq.compiler.incast as L\nfrom incoq.compiler.set import Mask\n\nfrom .pairrel import make_maprel\n\n\ndef mset_bindmatch(mask, bvars, uvars, body, *, typecheck):\n \"\"\"Form code for bindmatch over the M-set.\"\"\"\n \n tc_applicable = False\n \n if mask == Mask.BB:\n tc_applicable = True\n cont, item = bvars\n code = L.pc('''\n if ITEM in CONT:\n BODY\n ''', subst={'CONT': L.ln(cont),\n 'ITEM': L.ln(item),\n '<c>BODY': body})\n \n elif mask == Mask.OUT:\n tc_applicable = True\n (cont,) = bvars\n (item,) = uvars\n code = L.pc('''\n for ITEM in CONT:\n BODY\n ''', subst={'CONT': L.ln(cont),\n 'ITEM': L.sn(item),\n '<c>BODY': body})\n \n elif mask == Mask.B1:\n tc_applicable = True\n (cont,) = (item,) = bvars\n code = L.pc('''\n if CONT in CONT:\n BODY\n ''', subst={'CONT': L.ln(cont),\n '<c>BODY': body})\n \n elif mask == Mask.BW:\n tc_applicable = True\n (cont,) = bvars\n code = L.pc('''\n if not CONT.isempty():\n BODY\n ''', subst={'CONT': L.ln(cont),\n '<c>BODY': body})\n \n elif mask == Mask.UU:\n raise AssertionError('No object-domain equivalent for iterating over '\n 'the M-set')\n \n else:\n code = L.pc('''\n for UVARS in setmatch(SET, MASK, BVARS):\n BODY\n ''', subst={'UVARS': L.tuplify(uvars, lval=True),\n 'SET': L.ln('_M'),\n 'MASK': mask.make_node(),\n 'BVARS': L.tuplify(bvars),\n '<c>BODY': body})\n \n if typecheck and tc_applicable:\n code = L.pc('''\n if isinstance(CONT, Set):\n CODE\n ''', subst={'CONT': cont,\n '<c>CODE': code})\n \n return code\n\n\ndef fset_bindmatch(field, mask, bvars, uvars, body, *, typecheck):\n \"\"\"Form code for bindmatch over an F-set.\"\"\"\n \n # If we had such a thing as a negated field membership clause,\n # then the negative test for hasattr() would actually conflict\n # with the type check.\n \n tc_applicable = False\n \n if mask == Mask.BB:\n tc_applicable = True\n cont, item = bvars\n code = L.pc('''\n if CONT.FIELD == ITEM:\n BODY\n ''', subst={'CONT': L.ln(cont),\n '@FIELD': field,\n 'ITEM': L.ln(item),\n '<c>BODY': body})\n \n elif mask == Mask.OUT:\n tc_applicable = True\n (cont,) = bvars\n (item,) = uvars\n code = L.pc('''\n ITEM = CONT.FIELD\n BODY\n ''', subst={'CONT': L.ln(cont),\n '@FIELD': field,\n 'ITEM': L.sn(item),\n '<c>BODY': body})\n \n elif mask == Mask.B1:\n tc_applicable = True\n (cont,) = (item,) = bvars\n code = L.pc('''\n if CONT == CONT.FIELD:\n BODY\n ''', subst={'CONT': L.ln(cont),\n '@FIELD': field,\n '<c>BODY': body})\n \n elif mask == Mask.BW:\n # Not applicable because the code and the type check\n # are the same thing.\n tc_applicable = False\n (cont,) = bvars\n code = L.pc('''\n if hasattr(CONT, FIELD):\n BODY\n ''', subst={'CONT': L.ln(cont),\n 'FIELD': L.Str(field),\n '<c>BODY': body})\n \n elif mask == Mask.UU:\n raise AssertionError('No object-domain equivalent for iterating over '\n 'the M-set')\n \n else:\n code = L.pc('''\n for UVARS in setmatch(SET, MASK, BVARS):\n BODY\n ''', subst={'UVARS': L.tuplify(uvars, lval=True),\n 'SET': L.ln('_F_' + field),\n 'MASK': mask.make_node(),\n 'BVARS': L.tuplify(bvars),\n '<c>BODY': body})\n \n if typecheck and tc_applicable:\n code = L.pc('''\n if hasattr(CONT, FIELD):\n CODE\n ''', subst={'CONT': cont,\n 'FIELD': L.Str(field),\n '<c>CODE': code})\n \n return code\n\n\ndef mapset_bindmatch(mask, bvars, uvars, body, *, typecheck):\n \"\"\"Form code for bindmatch over the MAP set.\"\"\"\n \n tc_applicable = False\n \n if mask == Mask('bbb'):\n tc_applicable = True\n map, key, value = bvars\n code = L.pc('''\n if KEY in MAP and MAP[KEY] == VALUE:\n BODY\n ''', subst={'MAP': L.ln(map),\n 'KEY': L.ln(key),\n 'VALUE': L.ln(value),\n '<c>BODY': body})\n \n elif mask == Mask('bbu'):\n tc_applicable = True\n map, key = bvars\n (value,) = uvars\n code = L.pc('''\n if KEY in MAP:\n VALUE = MAP[KEY]\n BODY\n ''', subst={'MAP': L.ln(map),\n 'KEY': L.ln(key),\n 'VALUE': L.sn(value),\n '<c>BODY': body})\n \n elif mask == Mask('buu'):\n tc_applicable = True\n (map,) = bvars\n key, value = uvars\n code = L.pc('''\n for KEY, VALUE in MAP.items():\n BODY\n ''', subst={'MAP': L.ln(map),\n 'KEY': L.sn(key),\n 'VALUE': L.sn(value),\n '<c>BODY': body})\n \n # Other variations involving wildcards and equalities are possible,\n # but for now they're handled by the general auxmap case.\n \n elif mask == Mask('uuu'):\n raise AssertionError('No object-domain equivalent for iterating over '\n 'the MAP set')\n \n else:\n code = L.pc('''\n for UVARS in setmatch(SET, MASK, BVARS):\n BODY\n ''', subst={'UVARS': L.tuplify(uvars, lval=True),\n 'SET': L.ln(make_maprel()),\n 'MASK': mask.make_node(),\n 'BVARS': L.tuplify(bvars),\n '<c>BODY': body})\n \n if typecheck and tc_applicable:\n code = L.pc('''\n if isinstance(MAP, Map):\n CODE\n ''', subst={'MAP': map,\n '<c>CODE': code})\n \n return code\n" }, { "alpha_fraction": 0.5523937344551086, "alphanum_fraction": 0.5734850764274597, "avg_line_length": 31.824174880981445, "blob_id": "ff05676f5e65c7cbaab890373cd6d4df9c384efa", "content_id": "4999f2ade87cae74a95a6131236c2ab1edb51894", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2987, "license_type": "no_license", "max_line_length": 69, "num_lines": 91, "path": "/incoq/tests/invinc/incast/test_types.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Unit tests for util.py.\"\"\"\n\n\nimport unittest\nfrom simplestruct import Struct\n\nfrom incoq.compiler.incast.types import *\n\n\nclass TypeCase(unittest.TestCase):\n \n def test_frozendict_field(self):\n class A(Struct):\n f = FrozenDictField(int)\n a = A({1: 'a', 2: 'b'})\n \n class A(Struct):\n f = FrozenDictField(int, int)\n with self.assertRaises(TypeError):\n a = A({1: 1, 2: 'b'})\n \n def test_subtypeof_matches(self):\n t1 = SetType(numbertype)\n t2 = SetType(toptype)\n self.assertTrue(t1.issubtype(toptype))\n self.assertTrue(bottomtype.issubtype(t1))\n self.assertTrue(t1.issubtype(t2))\n \n self.assertTrue(t1.matches(t2))\n t3 = TupleType([t1, t2])\n t4 = TupleType([toptype, toptype])\n self.assertTrue(t3.matches(t4))\n self.assertFalse(bottomtype.matches(toptype))\n \n constrs = t3.match_against(t4)\n exp_constrs = [(t1, toptype), (t2, toptype)]\n self.assertEqual(constrs, exp_constrs)\n \n def test_join_types(self):\n t1 = SetType(numbertype)\n t2 = SetType(toptype)\n t3 = ListType(numbertype)\n self.assertEqual(t1.join(t2), t2)\n self.assertEqual(t1.join(t2, inverted=True), t1)\n self.assertEqual(t2.join(t3), toptype)\n \n t4 = DictType(toptype, numbertype)\n t5 = DictType(numbertype, bottomtype)\n t6 = DictType(toptype, numbertype)\n self.assertEqual(t4.join(t5), t6)\n \n def test_expand_types(self):\n t = DictType(SetType(TypeVar('T')),\n TupleType([numbertype, TypeVar('U')]))\n t = t.expand({'T': strtype, 'U': toptype})\n exp_t = DictType(SetType(strtype),\n TupleType([numbertype, toptype]))\n self.assertEqual(t, exp_t)\n \n def test_widen(self):\n t = DictType(SetType(numbertype),\n TupleType([toptype, SetType(toptype)]))\n t1 = t.widen(0)\n self.assertEqual(t1, toptype)\n t2 = t.widen(1)\n self.assertEqual(t2, DictType(toptype, toptype))\n t3 = t.widen(2)\n self.assertEqual(t3, DictType(SetType(toptype),\n TupleType([toptype, toptype])))\n t4 = t.widen(3)\n self.assertEqual(t4, t)\n \n def test_refine(self):\n t1 = RefineType('foo', numbertype)\n self.assertTrue(t1.issubtype(numbertype))\n self.assertEqual(t1.join(numbertype), numbertype)\n t2 = RefineType('bar', numbertype)\n self.assertTrue(t1.join(t2), numbertype)\n self.assertEqual(t1.meet(t2), bottomtype)\n \n t3 = RefineType('baz', SetType(numbertype))\n self.assertEqual(t3.widen(2), SetType(numbertype))\n \n def test_eval_typestr(self):\n t = eval_typestr('SetType(numbertype)')\n exp_t = SetType(numbertype)\n self.assertEqual(t, exp_t)\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.5954926609992981, "alphanum_fraction": 0.5966441631317139, "avg_line_length": 26.017778396606445, "blob_id": "ef69b0de578f8e6c02b969e69b3d672d74af99a1", "content_id": "6735702ed867bc9ef8de0618f9bba24baf1d1424", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6079, "license_type": "no_license", "max_line_length": 70, "num_lines": 225, "path": "/incoq/transform/trans.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Invoke incoq and maintain the stats database.\"\"\"\n\n\n__all__ = [\n 'Task',\n 'run_task',\n 'do_tasks',\n 'make_testprogram_task',\n 'TaskTemplate',\n 'task_from_template',\n \n 'make_in_task',\n 'COM',\n 'AUX',\n 'INC',\n 'INC_SUBDEM',\n 'INC_SUBDEM_OBJ',\n 'DEM',\n 'DEM_LRU',\n 'DEM_INLINE',\n 'DEM_NO_TAG_CHECK',\n 'DEM_SINGLE_TAG',\n 'DEM_NORCELIM',\n 'DEM_NOTYPECHECK',\n 'DEM_OBJ',\n 'DEM_SUBDEM',\n 'DEM_OBJ_NS',\n]\n\n\nfrom time import clock\nfrom os.path import normpath, relpath, join, splitext\n\nfrom simplestruct import Struct, Field, TypedField\n\nfrom incoq.util.linecount import get_loc_file\nfrom incoq.compiler.incast import print_exc_with_ast\nfrom incoq.compiler.central import transform_file\n\nfrom .statsdb import StatsDB\n\n\nclass Task(Struct):\n \n \"\"\"A single transformation task.\"\"\"\n \n display_name = TypedField(str)\n \"\"\"Display name for status printing.\"\"\"\n input_name = TypedField(str)\n \"\"\"Input filename.\"\"\"\n output_name = Field()\n \"\"\"Output filename, or None if no transformation.\"\"\"\n nopts = Field()\n \"\"\"Normal options.\"\"\"\n qopts = Field()\n \"\"\"Query options.\"\"\"\n\n\ndef run_task(task):\n # Dummy case for generating a stats entry for an input file's LOC.\n if task.output_name is None:\n in_loc = get_loc_file(task.input_name)\n stats = {'lines': in_loc}\n return stats\n \n input_name = normpath(relpath(task.input_name))\n output_name = normpath(relpath(task.output_name))\n print('==== Task {:30}{} -> {} ===='.format(\n task.display_name + ': ', input_name, output_name))\n \n try:\n stats = transform_file(task.input_name, task.output_name,\n nopts=task.nopts, qopts=task.qopts)\n return stats\n except Exception:\n print_exc_with_ast()\n return None\n\n\ndef do_tasks(tasks, path):\n \"\"\"Run a sequence of transformation tasks, updating the\n stats database. Return the time elapsed.\n \"\"\"\n t1 = clock()\n statsdb = StatsDB(path)\n statsdb.load()\n \n for t in tasks:\n cur_stats = run_task(t)\n if cur_stats is not None:\n statsdb.allstats[t.display_name] = cur_stats\n \n statsdb.save()\n t2 = clock()\n return t2 - t1\n\n\ndef make_testprogram_task(prog):\n \"\"\"Make a Task for transforming a test program. prog is the path\n to the program, relative to the incoq/tests/programs directory,\n excluding the '_in.py' suffix.\n \"\"\"\n path = join('incoq/tests/programs', prog)\n nopts = {'verbose': True, 'eol': 'lf'}\n return Task(prog, path + '_in.py', path + '_out.py', nopts, {})\n\n\nclass TaskTemplate:\n \n display_suffix = None\n \"\"\"Suffix appended to display name.\"\"\"\n \n output_suffix = 'out'\n \"\"\"Suffix appended to output file name.\"\"\"\n \n extra_nopts = {}\n \"\"\"nopts to use. Inherited from base classes as well.\"\"\"\n \n extra_qopts = {}\n \"\"\"qopts to use. Inherited from base classes as well.\"\"\"\n\ndef task_from_template(task, template):\n display_name = task.display_name\n if template.display_suffix is not None:\n display_name += ' ' + template.display_suffix\n \n output_name = task.output_name\n if template.output_suffix is not None:\n base, ext = splitext(output_name)\n output_name = base + '_' + template.output_suffix + ext\n \n bases = [c for c in template.__mro__\n if issubclass(c, TaskTemplate)]\n nopts = {}\n for c in reversed(bases):\n nopts.update(c.extra_nopts)\n nopts.update(task.nopts)\n \n qopts = {}\n for d in [c.extra_qopts for c in reversed(bases)] + [task.qopts]:\n for q, opts in d.items():\n qopts.setdefault(q, {}).update(opts)\n \n return task._replace(display_name=display_name,\n output_name=output_name,\n nopts=nopts,\n qopts=qopts)\n\n\ndef make_in_task(display_name, base_name):\n return Task(display_name + ' Input',\n base_name + '_in.py',\n None, {}, {})\n\n\nclass COM(TaskTemplate):\n extra_nopts = {'verbose': True,\n 'maint_inline': False,\n 'analyze_costs': True,\n 'selfjoin_strat': 'sub',\n 'default_aggr_halfdemand': True,\n 'autodetect_input_rels': True}\n\nclass AUX(COM):\n output_suffix = 'aux'\n display_suffix = 'Batch w/ maps'\n extra_nopts = {'default_impl': 'auxonly'}\n\nclass INC(COM):\n output_suffix = 'inc'\n display_suffix = 'Unfiltered'\n extra_nopts = {'default_impl': 'inc'}\n\nclass INC_SUBDEM(INC):\n extra_nopts = {'subdem_tags': False}\n\nclass INC_SUBDEM_OBJ(INC_SUBDEM):\n extra_nopts = {'obj_domain': True}\n\nclass DEM(COM):\n output_suffix = 'dem'\n display_suffix = 'Filtered'\n extra_nopts = {'default_impl': 'dem'}\n\nclass DEM_LRU(DEM):\n extra_nopts = {'default_uset_lru': 1}\n\nclass DEM_INLINE(DEM):\n output_suffix = 'dem_inline'\n display_suffix = 'Filtered (inlined)'\n extra_nopts = {'maint_inline': True}\n\nclass DEM_NO_TAG_CHECK(DEM):\n output_suffix = 'dem_notagcheck'\n display_suffix = 'Filtered (no demand checks)'\n extra_nopts = {'tag_checks': False}\n\nclass DEM_SINGLE_TAG(DEM):\n output_suffix = 'dem_singletag'\n display_suffix = 'Filtered (single tag)'\n extra_nopts = {'single_tag': True}\n\nclass DEM_NORCELIM(DEM):\n output_suffix = 'dem_norcelim'\n display_suffix = 'Filtered (no rc elim.)'\n extra_nopts = {'rc_elim': False}\n\nclass DEM_NOTYPECHECK(DEM):\n output_suffix = 'dem_notypecheck'\n display_suffix = 'Filtered (no type checks)'\n extra_nopts = {'maint_emit_typechecks': False}\n\nclass DEM_OBJ(DEM):\n output_suffix = 'dem'\n display_suffix = 'Filtered (obj)'\n extra_nopts = {'obj_domain': True}\n\nclass DEM_SUBDEM(DEM):\n output_suffix = 'dem_subdem'\n display_suffix = 'Filtered (alternate subquery demand)'\n extra_nopts = {'subdem_tags': False}\n\nclass DEM_OBJ_NS(DEM_OBJ):\n extra_nopts = {'nonstrict_fields': True,\n 'nonstrict_maps': True}\n" }, { "alpha_fraction": 0.4652862250804901, "alphanum_fraction": 0.4823386073112488, "avg_line_length": 24.65625, "blob_id": "b2e49c9d4b9b84975355a4285075cc29d5d6cdf9", "content_id": "0b2c87b5a1a8842d4e5f034fd443a89e7a650d8f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 821, "license_type": "no_license", "max_line_length": 60, "num_lines": 32, "path": "/setup.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from setuptools import setup\n\nsetup(\n name = 'IncOQ',\n version = '0.2.0-dev',\n url = 'https://github.com/IncOQ/incoq',\n \n author = 'Jon Brandvein',\n author_email = 'jon.brandvein@gmail.com',\n# license = ...,\n description = 'A system for compiling queries into ' \\\n 'incremental demand-driven code',\n \n classifiers = [\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Developers',\n 'Programming Language :: Python :: 3',\n 'Topic :: Software Development :: Compilers',\n ],\n \n packages = ['incoq'],\n \n test_suite = 'incoq.tests',\n \n install_requires = [\n 'simplestruct >=0.2.1',\n 'iast >=0.2.1',\n ],\n extras_require = {\n 'aggr': ['bintrees >=2.0.1'],\n }\n)\n" }, { "alpha_fraction": 0.5742971897125244, "alphanum_fraction": 0.6104417443275452, "avg_line_length": 13.647058486938477, "blob_id": "077904ad990b70e155f433152be4e824bc9e7fef", "content_id": "83f8ed90910dbf07d952524c2858516227108e43", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 249, "license_type": "no_license", "max_line_length": 36, "num_lines": 17, "path": "/incoq/tests/programs/aggr/rewrite_in.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "# Aggregate rewriting.\n\nfrom incoq.runtime import *\n\nOPTIONS(\n default_impl = 'inc',\n obj_domain = True,\n)\n\nR = Set()\nR.add(1)\nprint(sum(R))\n\n# These forms don't get transformed.\nprint(sum({1, 2}))\nprint(sum([1, 2]))\nprint(sum(({1:1}[1], 2)))\n" }, { "alpha_fraction": 0.42729488015174866, "alphanum_fraction": 0.5, "avg_line_length": 35.761192321777344, "blob_id": "dedbf1b9865c7bd533a844219a3a9a4a9be13bdd", "content_id": "2594260af11c80c69befc28df070cb28037d0bb6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2462, "license_type": "no_license", "max_line_length": 127, "num_lines": 67, "path": "/incoq/tests/programs/comp/deltawildeq_out.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from incoq.runtime import *\n# Comp1 := {(x, w) : (x, x, _) in S, (x, w) in T}\n_m_T_out = Map()\ndef _maint__m_T_out_add(_e):\n (v7_1, v7_2) = _e\n if (v7_1 not in _m_T_out):\n _m_T_out[v7_1] = set()\n _m_T_out[v7_1].add(v7_2)\n\ndef _maint__m_T_out_remove(_e):\n (v8_1, v8_2) = _e\n _m_T_out[v8_1].remove(v8_2)\n if (len(_m_T_out[v8_1]) == 0):\n del _m_T_out[v8_1]\n\n_m_S_b1w = Map()\ndef _maint__m_S_b1w_add(_e):\n (v5_1, v5_2, v5_3) = _e\n if ((v5_1 == v5_2)):\n if (v5_1 not in _m_S_b1w):\n _m_S_b1w[v5_1] = RCSet()\n if (() not in _m_S_b1w[v5_1]):\n _m_S_b1w[v5_1].add(())\n else:\n _m_S_b1w[v5_1].incref(())\n\nComp1 = RCSet()\ndef _maint_Comp1_S_add(_e):\n # Iterate {(v1_x, v1_w) : (v1_x, v1_x, _) in deltamatch(S, 'b1w', _e, 1), (v1_x, v1_w) in T}\n for v1_x in setmatch(({_e} if ((_m_S_b1w[_e[0]] if (_e[0] in _m_S_b1w) else RCSet()).getref(()) == 1) else {}), 'u1w', ()):\n for v1_w in (_m_T_out[v1_x] if (v1_x in _m_T_out) else set()):\n Comp1.add((v1_x, v1_w))\n\ndef _maint_Comp1_T_add(_e):\n # Iterate {(v3_x, v3_w) : (v3_x, v3_x, _) in S, (v3_x, v3_w) in deltamatch(T, 'bb', _e, 1)}\n (v3_x, v3_w) = _e\n for _ in (_m_S_b1w[v3_x] if (v3_x in _m_S_b1w) else RCSet()):\n Comp1.add((v3_x, v3_w))\n\ndef _maint_Comp1_T_remove(_e):\n # Iterate {(v4_x, v4_w) : (v4_x, v4_x, _) in S, (v4_x, v4_w) in deltamatch(T, 'bb', _e, 1)}\n (v4_x, v4_w) = _e\n for _ in (_m_S_b1w[v4_x] if (v4_x in _m_S_b1w) else RCSet()):\n Comp1.remove((v4_x, v4_w))\n\nfor (v1, v2) in [(1, 3)]:\n # Begin maint _m_T_out after \"T.add((v1, v2))\"\n _maint__m_T_out_add((v1, v2))\n # End maint _m_T_out after \"T.add((v1, v2))\"\n # Begin maint Comp1 after \"T.add((v1, v2))\"\n _maint_Comp1_T_add((v1, v2))\n # End maint Comp1 after \"T.add((v1, v2))\"\nfor (v1, v2, v3) in [(1, 2, 2), (1, 1, 2)]:\n # Begin maint _m_S_b1w after \"S.add((v1, v2, v3))\"\n _maint__m_S_b1w_add((v1, v2, v3))\n # End maint _m_S_b1w after \"S.add((v1, v2, v3))\"\n # Begin maint Comp1 after \"S.add((v1, v2, v3))\"\n _maint_Comp1_S_add((v1, v2, v3))\n # End maint Comp1 after \"S.add((v1, v2, v3))\"\nprint(sorted(Comp1))\n# Begin maint Comp1 before \"T.remove((1, 3))\"\n_maint_Comp1_T_remove((1, 3))\n# End maint Comp1 before \"T.remove((1, 3))\"\n# Begin maint _m_T_out before \"T.remove((1, 3))\"\n_maint__m_T_out_remove((1, 3))\n# End maint _m_T_out before \"T.remove((1, 3))\"\nprint(sorted(Comp1))" }, { "alpha_fraction": 0.5949820876121521, "alphanum_fraction": 0.6093189716339111, "avg_line_length": 16.4375, "blob_id": "b7dff0ea2f6e427d89473202615c5ca0669befce", "content_id": "bf3f115046eeb92d94b81588da7eedd0c81b76cf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 279, "license_type": "no_license", "max_line_length": 59, "num_lines": 16, "path": "/incoq/tests/programs/comp/uset/auto_in.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "# Comprehension with non-enumvar parameter, which should be\n# automatically added to a U-set.\n\nfrom incoq.runtime import *\n\nE = Set()\ng = 1\n\nfor z in [1, 2, 3]:\n E.add(z)\n\nQUERYOPTIONS(\n '{x for x in E if x > g}',\n impl = 'inc',\n)\nprint(sorted({x for x in E if x > g}))\n" }, { "alpha_fraction": 0.6005491614341736, "alphanum_fraction": 0.6010721921920776, "avg_line_length": 32.10822677612305, "blob_id": "207912eabac39e3cc3954d185f83f4fe56e3199d", "content_id": "2922ca379d54f62b8f20d6e2bcd0727f02526308", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7648, "license_type": "no_license", "max_line_length": 72, "num_lines": 231, "path": "/incoq/compiler/incast/inline.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Inlining transformation.\"\"\"\n\n\n__all__ = [\n 'PlainFunctionFinder',\n 'FunctionInfoGetter',\n 'CallInliner',\n 'inline_functions',\n]\n\n\nfrom incoq.util.collections import SetDict, OrderedSet\nfrom incoq.util.topsort import topsort, get_cycle\n\nfrom .nodes import *\nfrom .structconv import NodeVisitor, NodeTransformer, Templater\nfrom .helpers import (is_plainfuncdef, get_plainfuncdef,\n is_plaincall, get_plaincall)\nfrom .util import FuncEliminator, N\n\n\nclass PlainFunctionFinder(NodeVisitor):\n \n \"\"\"Return all names of top-level functions that only use plain\n arguments and calls. Non-top-level function definitions are not\n analyzed. Calls of non-Name nodes are not analyzed. It is an\n error for the functions to have multiple definitions.\n \n If stmt_only is True, functions that are called in expression\n context are excluded.\n \"\"\"\n \n def __init__(self, *, stmt_only):\n super().__init__()\n self.stmt_only = stmt_only\n \n def process(self, tree):\n self.toplevel_funcs = OrderedSet()\n self.excluded_funcs = set()\n \n self.infunc = False\n super().process(tree)\n assert not self.infunc\n \n return self.toplevel_funcs - self.excluded_funcs\n \n def visit_FunctionDef(self, node):\n if self.infunc:\n return\n \n name = node.name\n assert name not in self.toplevel_funcs, \\\n 'Multiple definitions of function ' + name\n self.toplevel_funcs.add(name)\n \n if not is_plainfuncdef(node):\n self.excluded_funcs.add(name)\n \n self.infunc = True\n self.generic_visit(node)\n self.infunc = False\n \n def visit_Expr(self, node):\n if self.stmt_only and isinstance(node.value, Call):\n # Treat Call nodes specially by directly calling\n # generic_visit() on them, bypassing the visit_Call()\n # behavior that would mark it as a bad call.\n self.generic_visit(node.value)\n else:\n # Otherwise just recurse as normal.\n self.visit(node.value)\n \n def visit_Call(self, node):\n if not isinstance(node.func, Name):\n self.generic_visit(node)\n return\n name = node.func.id\n \n if self.stmt_only:\n # We only get here if this call occurred in expression\n # context.\n self.excluded_funcs.add(name)\n else:\n if not is_plaincall(node):\n self.excluded_funcs.add(name)\n \n self.generic_visit(node)\n \n def visit_DemQuery(self, node):\n # For our purposes here, these are interpreted as calls in\n # expression context.\n name = N.queryfunc(node.demname)\n if self.stmt_only:\n self.excluded_funcs.add(name)\n \n self.generic_visit(node)\n\n\nclass FunctionInfoGetter(NodeVisitor):\n \n \"\"\"Builds structural information about the given set of functions,\n where each function satisfies the requirements given above for\n PlainFunctionFinder. Returns the following:\n \n 1) a mapping from function name to tuple of parameters\n \n 2) a mapping from function name to body code\n \n 3) an edge relation for a call graph, holding (x, y) if\n function x calls function y and both x and y are in\n the given set of function names\n \n 4) a topological sorting of the functions (fewest dependencies\n first) if one exists, or else None\n \n If require_nonrecursive is True, raise an error if a topological\n sorting can't be generated due to recursion.\n \"\"\" \n \n def __init__(self, funcs, *, require_nonrecursive):\n super().__init__()\n self.funcs = funcs\n self.require_nonrecursive = require_nonrecursive\n \n def process(self, tree):\n self.param_map = {}\n self.body_map = {}\n self.adj_map = SetDict()\n \n self.current_func = None\n super().process(tree)\n assert self.current_func is None\n \n edges = {(x, y) for x in self.adj_map for y in self.adj_map[x]}\n order = topsort(self.funcs, edges)\n if order is None and self.require_nonrecursive:\n raise AssertionError('Recursive functions found: ' +\n str(get_cycle(self.funcs, edges)))\n if order is not None:\n order.reverse()\n \n return self.param_map, self.body_map, edges, order\n \n def visit_FunctionDef(self, node):\n if self.current_func is not None:\n return\n \n name = node.name\n if name not in self.funcs:\n return\n \n self.param_map[name] = tuple(a.arg for a in node.args.args)\n self.body_map[name] = node.body\n \n self.current_func = name\n self.generic_visit(node)\n self.current_func = None\n \n def visit_Call(self, node):\n if self.current_func is not None and is_plaincall(node):\n name, _args = get_plaincall(node)\n if self.current_func in self.funcs and name in self.funcs:\n self.adj_map[self.current_func].add(name)\n \n self.generic_visit(node)\n \n def visit_DemQuery(self, node):\n name = N.queryfunc(node.demname)\n if self.current_func is not None:\n if self.current_func in self.funcs and name in self.funcs:\n self.adj_map[self.current_func].add(name)\n \n self.generic_visit(node)\n\n\nclass CallInliner(NodeTransformer):\n \n \"\"\"Replace statement-level calls in a block of code according to\n the given body_map. Occurrences of formal parameters in the body_map\n get replaced with the actual parameter expressions of the call.\n \"\"\"\n \n def __init__(self, param_map, body_map):\n super().__init__()\n self.param_map = param_map\n self.body_map = body_map\n \n def visit_Expr(self, node):\n if not is_plaincall(node.value):\n return None\n name, args = get_plaincall(node.value)\n \n if name in self.body_map:\n body = self.body_map[name]\n params = self.param_map[name]\n subst = dict(zip(params, args))\n body = Templater.run(body, subst)\n return body\n else:\n return None\n\n\ndef inline_functions(tree, funcs):\n \"\"\"Inline the given set of functions, removing their definitions.\n Each function must satisfy the requirements of PlainFunctionFinder\n above.\n \"\"\"\n found_funcs = PlainFunctionFinder.run(tree, stmt_only=True)\n if not set(funcs).issubset(found_funcs):\n bad_funcs = set(found_funcs).difference(funcs)\n raise AssertionError('Cannot inline functions: ' +\n ', '.join(bad_funcs))\n \n param_map, body_map, edges, order = FunctionInfoGetter.run(\n tree, funcs, require_nonrecursive=True)\n \n # In topological order, pick a function and expand the code\n # listed in its body_map. The order ensures that the substitution\n # map already contains entries for all called functions that are\n # to be inlined.\n for f in order:\n new_body = body_map[f]\n new_body = CallInliner.run(new_body, param_map, body_map)\n body_map[f] = new_body\n \n # Delete the function definitions so we don't need to process\n # them in the following expansion step.\n tree = FuncEliminator.run(tree, lambda n: n in funcs)\n \n tree = CallInliner.run(tree, param_map, body_map)\n return tree\n" }, { "alpha_fraction": 0.5271317958831787, "alphanum_fraction": 0.5374677181243896, "avg_line_length": 14.479999542236328, "blob_id": "a81a9d8813e35672a40d8aaaa00ca860de1d330a", "content_id": "2f253479642b79b0d1111f08a059ab411392f859", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 387, "license_type": "no_license", "max_line_length": 38, "num_lines": 25, "path": "/incoq/tests/programs/comp/types_in.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "# Test conversion to runtimelib types.\n\nfrom incoq.runtime import *\n\nQUERYOPTIONS(\n '{x.a for x in S}',\n impl = 'inc',\n)\nQUERYOPTIONS(\n '{x.b for x in S}',\n impl = 'batch',\n)\n\nclass T:\n def __init__(self, v):\n self.a = v\n self.b = 0\n\nS = set()\nfor elem in {T(1), T(2), T(3)}:\n S.add(elem)\n\nprint(sorted({x.a for x in S}))\n\nprint(sorted({x.b for x in S}))\n" }, { "alpha_fraction": 0.4440322518348694, "alphanum_fraction": 0.522834837436676, "avg_line_length": 40.36507797241211, "blob_id": "147f91cb5be2fe0523131921678c4329ef1286bb", "content_id": "2b4fb0576a7acfb69cb771b319427b00756535f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7817, "license_type": "no_license", "max_line_length": 112, "num_lines": 189, "path": "/incoq/tests/programs/aggr/uset_out.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from incoq.runtime import *\n# Comp1 := {(x, L, y) : L in _U_Comp1, (x, y) in E, (y < L)}\n# Aggr1 := sum(DEMQUERY(Comp1, [L], setmatch(Comp1, 'bbu', (x, L))), None)\n_m_Comp1_bbu = Map()\ndef _maint__m_Comp1_bbu_add(_e):\n (v11_1, v11_2, v11_3) = _e\n if ((v11_1, v11_2) not in _m_Comp1_bbu):\n _m_Comp1_bbu[(v11_1, v11_2)] = set()\n _m_Comp1_bbu[(v11_1, v11_2)].add(v11_3)\n\ndef _maint__m_Comp1_bbu_remove(_e):\n (v12_1, v12_2, v12_3) = _e\n _m_Comp1_bbu[(v12_1, v12_2)].remove(v12_3)\n if (len(_m_Comp1_bbu[(v12_1, v12_2)]) == 0):\n del _m_Comp1_bbu[(v12_1, v12_2)]\n\n_m_Aggr1_bbu = Map()\ndef _maint__m_Aggr1_bbu_add(_e):\n (v9_1, v9_2, v9_3) = _e\n if ((v9_1, v9_2) not in _m_Aggr1_bbu):\n _m_Aggr1_bbu[(v9_1, v9_2)] = set()\n _m_Aggr1_bbu[(v9_1, v9_2)].add(v9_3)\n\ndef _maint__m_Aggr1_bbu_remove(_e):\n (v10_1, v10_2, v10_3) = _e\n _m_Aggr1_bbu[(v10_1, v10_2)].remove(v10_3)\n if (len(_m_Aggr1_bbu[(v10_1, v10_2)]) == 0):\n del _m_Aggr1_bbu[(v10_1, v10_2)]\n\ndef _maint_Aggr1_add(_e):\n (v5_v1, v5_v2, v5_v3) = _e\n if ((v5_v1, v5_v2) in _U_Aggr1):\n v5_val = _m_Aggr1_bbu.singlelookup((v5_v1, v5_v2))\n v5_val = (v5_val + v5_v3)\n (v5_1, v5_2) = (v5_v1, v5_v2)\n v5_elem = _m_Aggr1_bbu.singlelookup((v5_v1, v5_v2))\n # Begin maint _m_Aggr1_bbu before \"Aggr1.remove((v5_1, v5_2, v5_elem))\"\n _maint__m_Aggr1_bbu_remove((v5_1, v5_2, v5_elem))\n # End maint _m_Aggr1_bbu before \"Aggr1.remove((v5_1, v5_2, v5_elem))\"\n # Begin maint _m_Aggr1_bbu after \"Aggr1.add((v5_1, v5_2, v5_val))\"\n _maint__m_Aggr1_bbu_add((v5_1, v5_2, v5_val))\n # End maint _m_Aggr1_bbu after \"Aggr1.add((v5_1, v5_2, v5_val))\"\n\ndef _maint_Aggr1_remove(_e):\n (v6_v1, v6_v2, v6_v3) = _e\n if ((v6_v1, v6_v2) in _U_Aggr1):\n v6_val = _m_Aggr1_bbu.singlelookup((v6_v1, v6_v2))\n v6_val = (v6_val - v6_v3)\n (v6_1, v6_2) = (v6_v1, v6_v2)\n v6_elem = _m_Aggr1_bbu.singlelookup((v6_v1, v6_v2))\n # Begin maint _m_Aggr1_bbu before \"Aggr1.remove((v6_1, v6_2, v6_elem))\"\n _maint__m_Aggr1_bbu_remove((v6_1, v6_2, v6_elem))\n # End maint _m_Aggr1_bbu before \"Aggr1.remove((v6_1, v6_2, v6_elem))\"\n # Begin maint _m_Aggr1_bbu after \"Aggr1.add((v6_1, v6_2, v6_val))\"\n _maint__m_Aggr1_bbu_add((v6_1, v6_2, v6_val))\n # End maint _m_Aggr1_bbu after \"Aggr1.add((v6_1, v6_2, v6_val))\"\n\n_U_Aggr1 = RCSet()\n_UEXT_Aggr1 = Set()\ndef demand_Aggr1(x, L):\n \"sum(DEMQUERY(Comp1, [L], setmatch(Comp1, 'bbu', (x, L))), None)\"\n if ((x, L) not in _U_Aggr1):\n _U_Aggr1.add((x, L))\n # Begin maint Aggr1 after \"_U_Aggr1.add((x, L))\"\n v7_val = 0\n for v7_elem in (_m_Comp1_bbu[(x, L)] if ((x, L) in _m_Comp1_bbu) else set()):\n v7_val = (v7_val + v7_elem)\n (v7_1, v7_2) = (x, L)\n # Begin maint _m_Aggr1_bbu after \"Aggr1.add((v7_1, v7_2, v7_val))\"\n _maint__m_Aggr1_bbu_add((v7_1, v7_2, v7_val))\n # End maint _m_Aggr1_bbu after \"Aggr1.add((v7_1, v7_2, v7_val))\"\n demand_Comp1(L)\n # End maint Aggr1 after \"_U_Aggr1.add((x, L))\"\n else:\n _U_Aggr1.incref((x, L))\n\ndef undemand_Aggr1(x, L):\n \"sum(DEMQUERY(Comp1, [L], setmatch(Comp1, 'bbu', (x, L))), None)\"\n if (_U_Aggr1.getref((x, L)) == 1):\n # Begin maint Aggr1 before \"_U_Aggr1.remove((x, L))\"\n undemand_Comp1(L)\n (v8_1, v8_2) = (x, L)\n v8_elem = _m_Aggr1_bbu.singlelookup((x, L))\n # Begin maint _m_Aggr1_bbu before \"Aggr1.remove((v8_1, v8_2, v8_elem))\"\n _maint__m_Aggr1_bbu_remove((v8_1, v8_2, v8_elem))\n # End maint _m_Aggr1_bbu before \"Aggr1.remove((v8_1, v8_2, v8_elem))\"\n # End maint Aggr1 before \"_U_Aggr1.remove((x, L))\"\n _U_Aggr1.remove((x, L))\n else:\n _U_Aggr1.decref((x, L))\n\ndef query_Aggr1(x, L):\n \"sum(DEMQUERY(Comp1, [L], setmatch(Comp1, 'bbu', (x, L))), None)\"\n if ((x, L) not in _UEXT_Aggr1):\n _UEXT_Aggr1.add((x, L))\n demand_Aggr1(x, L)\n return True\n\ndef _maint_Comp1__U_Comp1_add(_e):\n # Iterate {(v1_L, v1_x, v1_y) : v1_L in deltamatch(_U_Comp1, 'b', _e, 1), (v1_x, v1_y) in E, (v1_y < v1_L)}\n v1_L = _e\n for (v1_x, v1_y) in E:\n if (v1_y < v1_L):\n # Begin maint _m_Comp1_bbu after \"Comp1.add((v1_x, v1_L, v1_y))\"\n _maint__m_Comp1_bbu_add((v1_x, v1_L, v1_y))\n # End maint _m_Comp1_bbu after \"Comp1.add((v1_x, v1_L, v1_y))\"\n # Begin maint Aggr1 after \"Comp1.add((v1_x, v1_L, v1_y))\"\n _maint_Aggr1_add((v1_x, v1_L, v1_y))\n # End maint Aggr1 after \"Comp1.add((v1_x, v1_L, v1_y))\"\n\ndef _maint_Comp1__U_Comp1_remove(_e):\n # Iterate {(v2_L, v2_x, v2_y) : v2_L in deltamatch(_U_Comp1, 'b', _e, 1), (v2_x, v2_y) in E, (v2_y < v2_L)}\n v2_L = _e\n for (v2_x, v2_y) in E:\n if (v2_y < v2_L):\n # Begin maint Aggr1 before \"Comp1.remove((v2_x, v2_L, v2_y))\"\n _maint_Aggr1_remove((v2_x, v2_L, v2_y))\n # End maint Aggr1 before \"Comp1.remove((v2_x, v2_L, v2_y))\"\n # Begin maint _m_Comp1_bbu before \"Comp1.remove((v2_x, v2_L, v2_y))\"\n _maint__m_Comp1_bbu_remove((v2_x, v2_L, v2_y))\n # End maint _m_Comp1_bbu before \"Comp1.remove((v2_x, v2_L, v2_y))\"\n\ndef _maint_Comp1_E_add(_e):\n # Iterate {(v3_L, v3_x, v3_y) : v3_L in _U_Comp1, (v3_x, v3_y) in deltamatch(E, 'bb', _e, 1), (v3_y < v3_L)}\n (v3_x, v3_y) = _e\n for v3_L in _U_Comp1:\n if (v3_y < v3_L):\n # Begin maint _m_Comp1_bbu after \"Comp1.add((v3_x, v3_L, v3_y))\"\n _maint__m_Comp1_bbu_add((v3_x, v3_L, v3_y))\n # End maint _m_Comp1_bbu after \"Comp1.add((v3_x, v3_L, v3_y))\"\n # Begin maint Aggr1 after \"Comp1.add((v3_x, v3_L, v3_y))\"\n _maint_Aggr1_add((v3_x, v3_L, v3_y))\n # End maint Aggr1 after \"Comp1.add((v3_x, v3_L, v3_y))\"\n\ndef _maint_Comp1_E_remove(_e):\n # Iterate {(v4_L, v4_x, v4_y) : v4_L in _U_Comp1, (v4_x, v4_y) in deltamatch(E, 'bb', _e, 1), (v4_y < v4_L)}\n (v4_x, v4_y) = _e\n for v4_L in _U_Comp1:\n if (v4_y < v4_L):\n # Begin maint Aggr1 before \"Comp1.remove((v4_x, v4_L, v4_y))\"\n _maint_Aggr1_remove((v4_x, v4_L, v4_y))\n # End maint Aggr1 before \"Comp1.remove((v4_x, v4_L, v4_y))\"\n # Begin maint _m_Comp1_bbu before \"Comp1.remove((v4_x, v4_L, v4_y))\"\n _maint__m_Comp1_bbu_remove((v4_x, v4_L, v4_y))\n # End maint _m_Comp1_bbu before \"Comp1.remove((v4_x, v4_L, v4_y))\"\n\n_U_Comp1 = RCSet()\n_UEXT_Comp1 = Set()\ndef demand_Comp1(L):\n '{(x, L, y) : L in _U_Comp1, (x, y) in E, (y < L)}'\n if (L not in _U_Comp1):\n _U_Comp1.add(L)\n # Begin maint Comp1 after \"_U_Comp1.add(L)\"\n _maint_Comp1__U_Comp1_add(L)\n # End maint Comp1 after \"_U_Comp1.add(L)\"\n else:\n _U_Comp1.incref(L)\n\ndef undemand_Comp1(L):\n '{(x, L, y) : L in _U_Comp1, (x, y) in E, (y < L)}'\n if (_U_Comp1.getref(L) == 1):\n # Begin maint Comp1 before \"_U_Comp1.remove(L)\"\n _maint_Comp1__U_Comp1_remove(L)\n # End maint Comp1 before \"_U_Comp1.remove(L)\"\n _U_Comp1.remove(L)\n else:\n _U_Comp1.decref(L)\n\ndef query_Comp1(L):\n '{(x, L, y) : L in _U_Comp1, (x, y) in E, (y < L)}'\n if (L not in _UEXT_Comp1):\n _UEXT_Comp1.add(L)\n demand_Comp1(L)\n return True\n\nE = Set()\nfor e in [(1, 2), (1, 3), (1, 15), (2, 4)]:\n E.add(e)\n # Begin maint Comp1 after \"E.add(e)\"\n _maint_Comp1_E_add(e)\n # End maint Comp1 after \"E.add(e)\"\nL = 10\nx = 1\nprint((query_Aggr1(x, L) and _m_Aggr1_bbu.singlelookup((x, L))))\n# Begin maint Comp1 before \"E.remove((1, 3))\"\n_maint_Comp1_E_remove((1, 3))\n# End maint Comp1 before \"E.remove((1, 3))\"\nE.remove((1, 3))\nprint((query_Aggr1(x, L) and _m_Aggr1_bbu.singlelookup((x, L))))" }, { "alpha_fraction": 0.47560974955558777, "alphanum_fraction": 0.5284552574157715, "avg_line_length": 15.399999618530273, "blob_id": "d39fbf6caf80d53a03a4174e9391f7883ff81152", "content_id": "1f94c1250996ab8e8fd52c9acaabd20b080b7c16", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 246, "license_type": "no_license", "max_line_length": 71, "num_lines": 15, "path": "/incoq/tests/programs/aggr/comp_in.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "# Aggregate of a comprehension.\n\nfrom incoq.runtime import *\n\nOPTIONS(\n default_impl = 'inc',\n)\n\nE = Set()\n\nfor e in [(1, 2), (2, 3), (2, 4), (3, 5)]:\n E.add(e)\n\nx = 1\nprint(sum({z for (x2, y) in E for (y2, z) in E if x == x2 if y == y2}))\n" }, { "alpha_fraction": 0.5132743120193481, "alphanum_fraction": 0.5575221180915833, "avg_line_length": 9.7619047164917, "blob_id": "fa766534b6785bc85291f05e9bddecff038188e1", "content_id": "04f28ae4bf654dc3b2da84d981d296dafcee03e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 226, "license_type": "no_license", "max_line_length": 27, "num_lines": 21, "path": "/incoq/tests/programs/aggr/basic_in.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "# Basic aggregate query.\n\nfrom incoq.runtime import *\n\nOPTIONS(\n default_impl = 'inc',\n)\n\nR = Set()\n\nfor x in [1, 2, 3, 4, 5]:\n R.add(x)\n\nR.remove(5)\n\nprint(sum(R))\n\nfor x in [1, 2, 3, 4]:\n R.remove(x)\n\nprint(sum(R))\n" }, { "alpha_fraction": 0.6139705777168274, "alphanum_fraction": 0.625, "avg_line_length": 16, "blob_id": "c6d1e05efad75bc3d6b786317dee4b4b25bdc13b", "content_id": "55ee0a08fee3077efab49f8317e77a1591fb54f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 272, "license_type": "no_license", "max_line_length": 59, "num_lines": 16, "path": "/incoq/tests/programs/comp/unhandled_in.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "# Programs with comprehensions that we don't handle.\n\nfrom incoq.runtime import *\n\nOPTIONS(\n default_impl = 'inc',\n)\n\nN = Set()\n\nfor i in range(3):\n N.add(i)\n\nprint(sorted({(x, y) for x in N for y in N}))\n\nprint(sorted({(x, y) for x in range(3) for y in range(3)}))\n" }, { "alpha_fraction": 0.5711422562599182, "alphanum_fraction": 0.6192384958267212, "avg_line_length": 20.69565200805664, "blob_id": "fa44a496efab5093c2286d796f8e089bf799aef9", "content_id": "20eb304a8a140990412ff42bd232adf9d71bae80", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 499, "license_type": "no_license", "max_line_length": 56, "num_lines": 23, "path": "/incoq/runtime/benchmark.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Benchmark a few of the runtimelib types in comparison\nwith native Python types.\n\"\"\"\n\n\nfrom time import clock\nfrom runtimelib import Set\n\n\ndef run(settype, N, trials):\n s = settype()\n t1 = clock()\n for _ in range(trials):\n for x in range(N):\n s.add(5)\n s.remove(5)\n t2 = clock()\n return (t2 - t1) / trials\n\nnative_time = run(set, 300000, 50)\nruntimelib_time = run(Set, 300000, 50)\nprint(format(native_time, '.6f'))\nprint(format(runtimelib_time, '.6f'))\n" }, { "alpha_fraction": 0.7635658979415894, "alphanum_fraction": 0.7635658979415894, "avg_line_length": 31.25, "blob_id": "15526581ccc9a28009cb756952acbeffd5380d69", "content_id": "8ae175da170c8d281b3a25ca4f19ce5b4a41fbd5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 258, "license_type": "no_license", "max_line_length": 65, "num_lines": 8, "path": "/experiments/twitter/twitter_orig.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "# Batch computation like twitter_in, but define the non-demanding\n# query function to be a mere alias to the query function.\n# This avoids the tiny overhead of calling the NODEMAND no-op at\n# runtime.\n\nfrom .twitter_in import *\n\ndo_query_nodemand = do_query\n" }, { "alpha_fraction": 0.4301075339317322, "alphanum_fraction": 0.4516128897666931, "avg_line_length": 18.964284896850586, "blob_id": "c212ca5d3d5182fbb24b3cb7baec3b198b72e39e", "content_id": "4afeda573f92ee335159aee13a7e6ac6e8ede1cb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 558, "license_type": "no_license", "max_line_length": 49, "num_lines": 28, "path": "/incoq/tests/programs/objcomp/auxonly_out.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from incoq.runtime import *\ndef query_Comp1(s):\n 's -> {o_i : (s, o) in _M, (o, o_i) in _F_i}'\n result = set()\n if isinstance(s, Set):\n for o in s:\n if hasattr(o, 'i'):\n o_i = o.i\n if (o_i not in result):\n result.add(o_i)\n return result\n\nN = Set()\nfor i in range(1, 5):\n N._add(i)\ns1 = Set()\ns2 = Set()\nfor i in N:\n o = Obj()\n o.i = i\n if (i % 2):\n s1.add(o)\n else:\n s2.add(o)\ns = s1\nprint(sorted(query_Comp1(s)))\ns = s2\nprint(sorted(query_Comp1(s)))" }, { "alpha_fraction": 0.7222222089767456, "alphanum_fraction": 0.7222222089767456, "avg_line_length": 26, "blob_id": "16cab0ee5ecc916c2579c03e90bf35541b10419d", "content_id": "4a1e82a183eec2be828b669665e2c3a81f23f8d2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 54, "license_type": "no_license", "max_line_length": 26, "num_lines": 2, "path": "/experiments/jql/__init__.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from .java_bridge import *\nfrom .run_jql_exp import *\n" }, { "alpha_fraction": 0.42799460887908936, "alphanum_fraction": 0.42853298783302307, "avg_line_length": 27.576923370361328, "blob_id": "ec7077807c9c086c1836c514af162b48c51d2580", "content_id": "78b24aa6661846853546c3c7db04f6d875f56d8f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3715, "license_type": "no_license", "max_line_length": 73, "num_lines": 130, "path": "/incoq/tests/invinc/incast/test_inline.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Unit tests for inline.py.\"\"\"\n\n\nimport unittest\n\nfrom incoq.compiler.incast.inline import *\nfrom incoq.compiler.incast.structconv import parse_structast\nfrom incoq.compiler.incast.nodeconv import IncLangImporter\n\n\nclass InlineCase(unittest.TestCase):\n \n def p(self, source, subst=None, mode=None):\n return IncLangImporter.run(\n parse_structast(source, mode=mode, subst=subst))\n \n def pc(self, source, **kargs):\n return self.p(source, mode='code', **kargs)\n \n def ps(self, source, **kargs):\n return self.p(source, mode='stmt', **kargs)\n \n def pe(self, source, **kargs):\n return self.p(source, mode='expr', **kargs)\n \n def test_finder(self):\n code = self.pc('''\n def f(x):\n g(x)\n def g(y):\n print(h(y))\n def h(z):\n return w\n def m(k=1):\n pass\n f(a)\n ''')\n \n funcs = PlainFunctionFinder.run(code, stmt_only=False)\n exp_funcs = ['f', 'g', 'h']\n self.assertSequenceEqual(funcs, exp_funcs)\n \n funcs = PlainFunctionFinder.run(code, stmt_only=True)\n exp_funcs = ['f', 'g']\n self.assertSequenceEqual(funcs, exp_funcs)\n \n code = self.pc('''\n def f(x):\n f(x)\n def f(y):\n f(y)\n ''')\n with self.assertRaises(AssertionError):\n PlainFunctionFinder.run(code, stmt_only=False)\n \n def test_infogetter(self):\n code = self.pc('''\n def f(x):\n g(x)\n def g(y):\n print(h(y))\n def h(z):\n return w\n def m(k=1):\n pass\n f(a)\n ''')\n \n param_map, body_map, edges, order = FunctionInfoGetter.run(\n code, ['f', 'g', 'h'], require_nonrecursive=True)\n exp_param_map = {'f': ('x',),\n 'g': ('y',),\n 'h': ('z',)}\n exp_body_map = {'f': self.pc('g(x)'),\n 'g': self.pc('print(h(y))'),\n 'h': self.pc('return w')}\n exp_edges = {('f', 'g'), ('g', 'h')}\n exp_order = ['h', 'g', 'f']\n self.assertEqual(param_map, exp_param_map)\n self.assertEqual(body_map, exp_body_map)\n self.assertEqual(edges, exp_edges)\n self.assertEqual(order, exp_order)\n \n code = self.pc('''\n def f(x):\n g(x)\n def g(x):\n f(x)\n ''')\n with self.assertRaises(AssertionError):\n FunctionInfoGetter.run(code, ['f', 'g'],\n require_nonrecursive=True)\n \n def test_callinliner(self):\n code = self.pc('''\n f(a)\n print(f(a))\n ''')\n param_map = {'f': ('x',)}\n body_map = {'f': self.pc('foo(x, y)')}\n code = CallInliner.run(code, param_map, body_map)\n exp_code = self.pc('''\n foo(a, y)\n print(f(a))\n ''')\n self.assertEqual(code, exp_code)\n \n def test_inline(self):\n code = self.p('''\n def f(x):\n g(x)\n def g(y):\n print(h(y))\n def h(z):\n return w\n f(a)\n g(b)\n ''')\n code = inline_functions(code, {'f', 'g'})\n exp_code = self.p('''\n def h(z):\n return w\n print(h(a))\n print(h(b))\n ''')\n self.assertEqual(code, exp_code)\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.49598392844200134, "alphanum_fraction": 0.5160642862319946, "avg_line_length": 17.44444465637207, "blob_id": "0105c60c72b0478df71a8eb27bc9b5099a152890", "content_id": "a489fb15ebe9af4a3e175472b45f103376867031", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 498, "license_type": "no_license", "max_line_length": 37, "num_lines": 27, "path": "/incoq/tests/runtimelib/test_lru.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Unit tests for the lru module.\"\"\"\n\n\nimport unittest\n\nfrom incoq.runtime.lru import *\n\n\nclass TestLRU(unittest.TestCase):\n \n def test_lru(self):\n s = LRUTracker()\n s.add(1)\n s.add(2)\n s.add(3)\n self.assertEqual(s.peek(), 1)\n s.remove(1)\n self.assertEqual(s.peek(), 2)\n v = s.pop()\n self.assertEqual(v, 2)\n s.add(4)\n s.ping(3)\n self.assertEqual(s.peek(), 4)\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.4735148549079895, "alphanum_fraction": 0.4752475321292877, "avg_line_length": 32.66666793823242, "blob_id": "7dc727275d69421e1555b8d5333732497250d747", "content_id": "4a3e18ddfd735bb95666a1994fa51d922846ae5b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4040, "license_type": "no_license", "max_line_length": 77, "num_lines": 120, "path": "/incoq/compiler/tup/tuprel.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Tuple relations.\"\"\"\n\n\n__all__ = [\n 'is_trel',\n 'get_trel',\n 'make_trel',\n \n 'trel_bindmatch',\n 'check_bad_setmatches',\n]\n\n\nimport incoq.compiler.incast as L\n\n\ndef is_trel(rel):\n return rel.startswith('_TUP')\n\ndef get_trel(rel):\n assert rel.startswith('_TUP')\n arity = rel[len('_TUP'):]\n return int(arity)\n\ndef make_trel(arity):\n return '_TUP' + str(arity)\n\n\ndef trel_bindmatch(trel, mask, vars, body, *, typecheck):\n \"\"\"Make code to pattern match over a tuple relation.\"\"\"\n assert len(mask) >= 2\n tup, *elts = vars\n arity = get_trel(trel)\n assert arity == len(elts)\n bvars, uvars, _eqs = mask.split_vars(vars)\n \n if mask.parts[0] == 'b':\n # Kick it over to setmatch(). The set being matched is\n # a singleton containing a tuple whose first component\n # is the passed-in tuple, and whose remaining components\n # are those of the passed-in tuple, i.e.\n #\n # (tup, tup[0], ..., tup[n])\n #\n # This allows the mask to work correctly with arbitrary\n # equality components, wildcards, etc.\n singtup_elts = (L.ln(tup),)\n for i in range(arity):\n singtup_elts += (L.pe('TUP[IND]', subst={'TUP': L.ln(tup),\n 'IND': L.Num(i)}),)\n singtup = L.Tuple(singtup_elts, L.Load())\n \n code = L.pc('''\n for UVARS in setmatch({SINGTUP}, MASK, BVARS):\n BODY\n ''', subst={'UVARS': L.tuplify(uvars, lval=True),\n 'SINGTUP': singtup,\n 'MASK': mask.make_node(),\n 'BVARS': L.tuplify(bvars),\n '<c>BODY': body})\n \n # If any other part of the mask besides the first component\n # is bound, we need to confirm that it is consistent with\n # the tuple value of the first component.\n var_parts = list(enumerate(zip(vars, mask.parts)))\n conds = [L.pe('TUP[IND] == VAR', subst={'TUP': L.ln(tup),\n 'IND': L.Num(i),\n 'VAR': L.ln(var)})\n for i, (var, part) in var_parts\n if i > 0 if part == 'b']\n if len(conds) == 0:\n cond = None\n elif len(conds) == 1:\n cond = conds[0]\n else:\n cond = L.BoolOp(L.And(), conds)\n if cond is not None:\n code = L.pc('''\n if COND:\n CODE\n ''', subst={'COND': cond,\n '<c>CODE': code})\n \n if typecheck:\n code = L.pc('''\n if isinstance(TUP, tuple) and len(TUP) == ARITY:\n CODE\n ''', subst={'TUP': L.ln(tup),\n 'ARITY': L.Num(arity),\n '<c>CODE': code})\n \n # TODO: Case where first component is unbound but all other\n # components are bound, in which case we can avoid a map lookup.\n \n else:\n code = L.pc('''\n for UVARS in setmatch(SET, MASK, BVARS):\n BODY\n ''', subst={'UVARS': L.tuplify(uvars, lval=True),\n 'SET': L.ln(trel),\n 'MASK': mask.make_node(),\n 'BVARS': L.tuplify(bvars),\n '<c>BODY': body})\n \n return code\n\n\ndef check_bad_setmatches(tree):\n \"\"\"Raise an error if there are any setmatches over tuple relations,\n which are not meaningful in the final program since tuple relations\n are not materialized.\n \"\"\"\n class Vis(L.NodeVisitor):\n def visit_SetMatch(self, node):\n if (isinstance(node.target, L.Name) and\n is_trel(node.target.id)):\n raise AssertionError('Cannot generate general auxiliary map '\n 'over nested tuples without using '\n 'demand')\n Vis.run(tree)\n" }, { "alpha_fraction": 0.47194692492485046, "alphanum_fraction": 0.48545029759407043, "avg_line_length": 28.381669998168945, "blob_id": "a32c700cabb0687278129c37cfa5ee78b516782f", "content_id": "bcb2a882c998dcc61c56cd1cd458cd70437ceabe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 25327, "license_type": "no_license", "max_line_length": 79, "num_lines": 862, "path": "/experiments/jql/run_jql_exp.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Run the JQL experiment. See WPN08 Section 4.3.\n\nQueries:\n\n one-level:\n {a for a in attends if a.course == COMP101}\n\n two-level:\n {(a, s) for a in attends if a.course == COMP101\n for s in students if a.student == s}\n\n three-level:\n {(a, s, c) for a in attends if a.course == COMP101\n for s in students if a.student == s\n for c in courses if a.course == c}\n\"\"\"\n\n\nfrom random import random, randrange\nimport os\nimport sys\nfrom copy import deepcopy\nimport importlib\nfrom itertools import groupby\nfrom operator import itemgetter\n\nimport numpy as np\n\nfrom frexp import (ExpWorkflow, Datagen, Runner, Verifier,\n SimpleExtractor, MetricExtractor)\n\nfrom .java_bridge import get_config, spawn_java\n\nfrom experiments.util import SmallExtractor, LargeExtractor, canonize\n\n\nclass JQLDatagen(Datagen):\n \n \"\"\"Procedure as in WPN08. Create n students, courses, and attends.\n Use the ratio to decide whether to generate a query or a random\n add and remove.\n \n Params:\n N -- size\n nops -- number of operations\n ratio -- proportion of operations that are queries\n \n Note that WPN08 describes the ratio as a query-to-update ratio,\n rather than a query-to-all-ops ratio. This seems to be at odds with\n their graph data, which shows the cost of updates approaching zero\n as the ratio approaches 1, and which shows a behavioral change\n in \"ratio caching\" at x=0.2 when their stated threshold is 0.25.\n \n WPN08 is slightly unclear on whether an update operation\n deletes and re-adds the same Attends object, or removes\n an existing one and adds a brand new one. We interpret\n it as the latter.\n \n Each update, a random Attends object is chosen for removal.\n A new Attends object is constructed having a random Student\n and Course, and added in place of the old one. No attempt is\n made to detect or prevent redundant attend objects (having\n the same Course and Student). No explicit deallocation of\n the old Attends object is done.\n \"\"\"\n \n # All three queries are handled with the same core.\n \n def generate(self, P):\n N = P['N']\n nops = P['nops']\n ratio = P['ratio']\n \n # Description of initial Attends objects.\n INIT_ATT = [(randrange(N), randrange(N)) for _ in range(N)]\n \n OPS = []\n for _ in range(nops):\n if random() < ratio:\n OPS += [('query', None)]\n \n else:\n # Index of Attends object to remove.\n a = randrange(N)\n # Indices describing new Attends object to replace it with.\n s = randrange(N)\n c = randrange(N)\n OPS += [('update', (a, s, c))]\n \n return dict(\n dsparams = P,\n N = N,\n ratio = ratio,\n INIT_ATT = INIT_ATT,\n OPS = OPS,\n )\n \n level = None\n \n def get_tparams_list(self, dsparams):\n # It is important to run all tests of the same kind\n # together. I've noticed a cache-recency effect where\n # Java processes that start immediately after other Java\n # processes, with no intervening Python processes, get\n # an unfair speedup. \n \n return [\n dict(tid = dsp['dsid'],\n dsid = dsp['dsid'],\n prog = 'jql_' + self.level + progsuf)\n for progsuf in self.prog_suffixes\n for dsp in dsparams\n ]\n\n\nclass JQLDriver:\n \n check_interval = 100\n timeout = 60\n \n # Operation enum.\n Q = 1\n UP = 2\n \n def __init__(self, pipe_filename):\n import gc\n import pickle\n \n gc.disable()\n \n with open(pipe_filename, 'rb') as pf:\n dataset, prog, other_tparams = pickle.load(pf)\n os.remove(pipe_filename)\n \n \n self.prog = prog\n self.module = None\n self.N = dataset['N']\n self.init_att = dataset['INIT_ATT']\n self.ops = dataset['OPS']\n \n self.results = {}\n \n self.setUp()\n \n from frexp.util import StopWatch\n from time import process_time, perf_counter\n timer_cpu = StopWatch(process_time)\n timer_wall = StopWatch(perf_counter)\n \n # Make available to run for timeouts.\n self.timer_cpu = timer_cpu\n \n with timer_cpu, timer_wall:\n finished = self.run()\n \n if finished:\n import incoq.runtime\n self.results['size'] = incoq.runtime.get_total_structure_size(\n self.module.__dict__)\n self.results['time_cpu'] = timer_cpu.consume()\n self.results['time_wall'] = timer_wall.consume()\n \n self.results['stdmetric'] = self.results['time_cpu']\n else:\n self.results['timedout'] = True\n \n self.tearDown()\n \n \n with open(pipe_filename, 'wb') as pf:\n pickle.dump(self.results, pf)\n \n def setUp(self):\n # Import driven program.\n dirname, filename = os.path.split(self.prog)\n if dirname:\n sys.path.append(dirname)\n try:\n self.module = importlib.import_module(\n 'experiments.jql.' + filename)\n finally:\n if dirname:\n sys.path.pop()\n \n \n m = self.module\n N = self.N\n \n # Populate dataset.\n self.students = [m.make_student('s' + str(i))\n for i in range(N)]\n self.courses = [m.make_course('c' + str(i))\n for i in range(N)]\n self.course0 = self.courses[0]\n self.attends = [m.make_attends(self.students[s], self.courses[c])\n for s, c in self.init_att]\n \n m.do_query(self.course0)\n \n # Preprocess operations.\n for i, (op, data) in enumerate(self.ops):\n if op == 'query':\n self.ops[i] = (self.Q, data)\n elif op == 'update':\n ai, s, c = data\n data = (ai, self.students[s], self.courses[c])\n self.ops[i] = (self.UP, data)\n \n def run(self):\n course0 = self.course0\n Q = self.Q\n UP = self.UP\n attends = self.attends\n do_query = self.module.do_query_nodemand\n replace_attends = self.module.replace_attends\n \n check_interval = self.check_interval\n timer_cpu = self.timer_cpu\n timeout = self.timeout\n \n for i, (op, data) in enumerate(self.ops):\n # Check timeout every so often.\n if i % check_interval == 0:\n if timer_cpu.elapsed > timeout:\n return False\n \n if op is Q:\n do_query(course0)\n elif op is UP:\n i, s, c = data\n old_att = attends[i]\n new_att = replace_attends(old_att, s, c)\n attends[i] = new_att\n else:\n assert()\n \n return True\n \n def tearDown(self):\n pass\n\nclass JQLVerifyDriver:\n \n condense_output = True\n \n def log_output(self, output):\n canon_value = canonize(output, use_hash=self.condense_output)\n self.results['output'].append(canon_value)\n \n # Operation enum.\n Q = 1\n UP = 2\n \n def __init__(self, pipe_filename):\n import gc\n import pickle\n \n gc.disable()\n \n with open(pipe_filename, 'rb') as pf:\n dataset, prog, other_tparams = pickle.load(pf)\n os.remove(pipe_filename)\n \n \n self.prog = prog\n self.module = None\n self.N = dataset['N']\n self.init_att = dataset['INIT_ATT']\n self.ops = dataset['OPS']\n \n self.results = {}\n \n self.setUp()\n \n from frexp.util import StopWatch\n from time import process_time, perf_counter\n timer_cpu = StopWatch(process_time)\n timer_wall = StopWatch(perf_counter)\n \n self.results['output'] = []\n \n with timer_cpu, timer_wall:\n finished = self.run()\n \n self.tearDown()\n \n self.results['output'] = canonize(self.results['output'],\n use_hash=self.condense_output)\n \n \n with open(pipe_filename, 'wb') as pf:\n pickle.dump(self.results, pf)\n \n def setUp(self):\n # Import driven program.\n dirname, filename = os.path.split(self.prog)\n if dirname:\n sys.path.append(dirname)\n try:\n self.module = importlib.import_module(\n 'experiments.jql.' + filename)\n finally:\n if dirname:\n sys.path.pop()\n \n \n m = self.module\n N = self.N\n \n # Populate dataset.\n self.students = [m.make_student('s' + str(i))\n for i in range(N)]\n self.courses = [m.make_course('c' + str(i))\n for i in range(N)]\n self.course0 = self.courses[0]\n self.attends = [m.make_attends(self.students[s], self.courses[c])\n for s, c in self.init_att]\n \n m.do_query(self.course0)\n \n # Preprocess operations.\n for i, (op, data) in enumerate(self.ops):\n if op == 'query':\n self.ops[i] = (self.Q, data)\n elif op == 'update':\n ai, s, c = data\n data = (ai, self.students[s], self.courses[c])\n self.ops[i] = (self.UP, data)\n \n def run(self):\n course0 = self.course0\n Q = self.Q\n UP = self.UP\n attends = self.attends\n do_query = self.module.do_query_nodemand\n replace_attends = self.module.replace_attends\n \n for i, (op, data) in enumerate(self.ops):\n if op is Q:\n output = do_query(course0)\n self.log_output(output)\n elif op is UP:\n i, s, c = data\n old_att = attends[i]\n new_att = replace_attends(old_att, s, c)\n attends[i] = new_att\n else:\n assert()\n \n return True\n \n def tearDown(self):\n pass\n\nclass JQLRunner(Runner):\n \n # Special-cased for using spawn_java() instead of the\n # normal driver, when the program to run is one of the\n # Java versions.\n \n def dispatch_test(self, dataset, prog, other_tparams):\n if 'java' in prog:\n # jql_<level>_java_<cache>\n level = prog[4:5]\n cache = {'cache': True, 'nocache': False}[prog.split('_')[-1]]\n config = get_config()\n results = spawn_java(config, level, cache, False, dataset)\n return results\n else:\n return super().dispatch_test(dataset, prog, other_tparams)\n \n def run_all_tests(self, tparams_list):\n # Hack to skip trials for a prog after there's been a timeout.\n blacklist = set()\n \n datapoint_list = []\n for i, trial in enumerate(tparams_list, 1):\n prog = trial['prog']\n if prog in blacklist:\n self.print('Skipping test ' + str(i))\n continue\n \n itemstr = 'Running test {} of {} ...'.format(i, len(tparams_list))\n self.print(itemstr, end='')\n \n datapoints, timedout = self.repeat_single_test(trial, len(itemstr))\n if timedout:\n blacklist.add(prog)\n datapoint_list.extend(datapoints)\n \n return datapoint_list\n\nclass JQLVerifier(Verifier):\n \n condense_output = JQLVerifyDriver.condense_output\n \n def dispatch_test(self, dataset, prog, other_tparams):\n # Use verify=True for spawn_java().\n # Canonize result.\n \n if 'java' in prog:\n # jql_<level>_java_<cache>\n level = prog[4:5]\n cache = {'cache': True, 'nocache': False}[prog.split('_')[-1]]\n config = get_config()\n results = spawn_java(config, level, cache, True, dataset)\n results['output'] = canonize(results['output'],\n use_hash=self.condense_output)\n return results\n else:\n return super().dispatch_test(dataset, prog, other_tparams)\n\n\nclass JQLExtractor(SimpleExtractor, SmallExtractor):\n \n orig_format = 'poly1'\n jqlcache_format = 'normal'\n jqlnocache_format = 'normal'\n \n @property\n def series(self):\n # Post-process to copy 3 times for the 3 levels.\n template_list = [\n ('jql_{}_java_nocache', 'JQL no caching',\n 'purple', '-- s ' + self.jqlnocache_format),\n ('jql_{}_java_cache', 'JQL always caching',\n 'teal', '-- o ' + self.jqlcache_format),\n ('jql_{}_orig', 'original',\n 'red', '- s ' + self.orig_format),\n ('jql_{}_inc', 'incremental',\n 'blue', '- o poly1'),\n ('jql_{}_dem', 'filtered',\n 'green', '- ^ poly1'),\n ]\n \n return [(sid.format(level), name.format(level), color, style)\n for level in ['1', '2', '3']\n for sid, name, color, style in template_list]\n \n def get_series_points(self, datapoints, sid, *,\n average):\n # Hook into this so we can dump std convergence info.mail.g\n points = super().get_series_points(datapoints, sid, average=False)\n \n fmtstring = ('{}: x = {:.3f}, stddev = {:.3f}, '\n 'mean = {:.3f}, rsd = {:.3f}')\n \n if len(points) > 0:\n points = [(x, y) for x, y, _, _ in points]\n points.sort(key=itemgetter(0))\n groups = groupby(points, key=itemgetter(0))\n \n worst = 0\n for x, grouppoints in groups:\n ys = [y for (_, y) in grouppoints]\n m = np.mean(ys)\n s = np.std(ys)\n print(fmtstring.format(sid, x, s, m, s / m))\n worst = max(worst, s / m)\n print('-' * 16)\n print('{}: worst = {:.3f}'.format(sid, worst))\n print()\n \n return super().get_series_points(datapoints, sid, average=average)\n\n\nclass JQLWorkflow(ExpWorkflow):\n \n class ExpDatagen(JQLDatagen):\n \n prog_suffixes = [\n # Orig takes forever to run because it's a Cartesian product\n # followed by join conditions. It can even take a while to\n # timeout because the timeout test is synchronous with\n # completing a certain number of operations.\n '_orig',\n '_inc',\n '_dem',\n '_java_nocache',\n '_java_cache',\n ]\n \n class ExpExtractor(JQLExtractor):\n pass\n # Comment out below for paper submission figure that\n # omits the title.\n @property\n def title(self):\n return 'Query ' + str(self.level)\n \n ExpRunner = JQLRunner\n ExpVerifier = JQLVerifier\n ExpDriver = JQLDriver\n ExpVerifyDriver = JQLVerifyDriver\n \n require_ac = False ###\n\n\nclass Ratio(JQLWorkflow):\n \n \"\"\"Vary the query ratio.\"\"\"\n \n class ExpDatagen(JQLWorkflow.ExpDatagen):\n \n N = 1000\n nops = 5000\n divs = 20\n \n def get_dsparams_list(self):\n return [\n dict(\n dsid = str(ratio),\n x = ratio,\n \n N = self.N,\n nops = self.nops,\n ratio = ratio,\n )\n for ratio in [.01] + [i/self.divs for i in\n range(0, self.divs + 1)]\n ]\n \n stddev_window = .1\n min_repeats = 50\n max_repeats = 50\n \n class ExpExtractor(JQLWorkflow.ExpExtractor, MetricExtractor):\n \n ylabel = 'Running time (in seconds)'\n xlabel = 'Fraction of operations that are queries'\n \n metric = 'time_cpu'\n \n xmin = -.05\n xmax = 1.05\n\nclass Ratio1(Ratio):\n \n prefix = 'results/jql_ratio_1'\n \n class ExpDatagen(Ratio.ExpDatagen):\n level = '1'\n \n class ExpExtractor(Ratio.ExpExtractor):\n level = '1'\n ymax = .5\n\nclass Ratio2(Ratio):\n \n prefix = 'results/jql_ratio_2'\n \n class ExpDatagen(Ratio.ExpDatagen):\n level = '2'\n \n class ExpExtractor(Ratio.ExpExtractor):\n \n level = '2'\n \n @property\n def series(self):\n s = super().series\n new_s = []\n for sid, name, color, style in s:\n if sid in ['jql_2_orig']:\n name += ' / 20'\n new_s.append((sid, name, color, style))\n return new_s\n \n def project_y(self, p):\n y = super().project_y(p)\n if p['prog'] in ['jql_2_orig']:\n return y / 2e1\n else:\n return y\n \n ymax = 2\n legend_loc = 'upper center'\n\nclass Ratio3(Ratio):\n prefix = 'results/jql_ratio_3'\n \n class ExpDatagen(Ratio.ExpDatagen):\n level = '3'\n # Don't run orig, takes forever even to timeout.\n prog_suffixes = [\n '_inc',\n '_dem',\n '_java_nocache',\n '_java_cache',\n ]\n \n class ExpExtractor(Ratio.ExpExtractor):\n level = '3'\n ymax = 2\n legend_loc = 'upper center'\n\n\nclass Scale(JQLWorkflow):\n \n \"\"\"Scale up the number of elements.\"\"\"\n \n class ExpDatagen(JQLWorkflow.ExpDatagen):\n \n ratio = .5\n points = list(range(1000, 30000 + 1, 1000))\n nops = 5000\n \n def get_dsparams_list(self):\n return [\n dict(\n dsid = str(x),\n x = x,\n \n N = x,\n nops = self.nops,\n ratio = self.ratio,\n )\n for x in self.points\n ]\n \n stddev_window = .1\n min_repeats = 50\n max_repeats = 50\n \n class ExpExtractor(JQLWorkflow.ExpExtractor, MetricExtractor):\n \n ylabel = 'Running time (in seconds)'\n xlabel = 'Size of source collections (in thousands)'\n \n metric = 'time_cpu'\n \n def project_x(self, p):\n return super().project_x(p) / 1e3\n \n ymin = 0\n xmin = 1\n# xmax = 21\n xmax = 31\n# x_ticklocs = [0, 4, 8, 12, 16, 20]\n x_ticklocs = [0, 5, 10, 15, 20, 25, 30]\n\nclass Scale1(Scale):\n \n prefix = 'results/jql_scale_1'\n \n class ExpDatagen(Scale.ExpDatagen):\n level = '1'\n \n class ExpExtractor(Scale.ExpExtractor):\n \n level = '1'\n orig_format = 'poly1'\n jqlcache_format = 'poly1'\n jqlnocache_format = 'poly1'\n# orig_format = 'points'\n# jqlcache_format = 'points'\n# jqlnocache_format = 'points'\n# generate_csv = False\n \n multipliers = {\n# 'jql_1_orig': .5,\n 'jql_1_inc': 50,\n 'jql_1_dem': 50,\n# 'jql_1_java_nocache': 1e-2,\n 'jql_1_java_cache': 50,\n }\n \n @property\n def series(self):\n s = super().series\n new_s = []\n for sid, name, color, style in s:\n mult = self.multipliers.get(sid, None)\n if mult is not None:\n op = ' $\\\\times$ ' if mult >= 1 else ' / '\n if mult < 1:\n mult = 1 / mult\n if round(mult, 3) == round(mult):\n mult = round(mult)\n else:\n mult = round(mult, 3)\n name += op + str(mult)\n new_s.append((sid, name, color, style))\n return new_s\n \n def project_y(self, p):\n y = super().project_y(p)\n if p['prog'] in self.multipliers:\n return y * self.multipliers[p['prog']]\n else:\n return y\n\nclass Scale2(Scale):\n \n prefix = 'results/jql_scale_2'\n \n class ExpDatagen(Scale.ExpDatagen):\n level = '2'\n \n # Don't run orig, which takes forever even before\n # reaching the 100 step timeout checkpoint.\n prog_suffixes = [\n '_inc',\n '_dem',\n '_java_nocache',\n '_java_cache',\n ]\n \n class ExpExtractor(Scale.ExpExtractor):\n \n level = '2'\n jqlcache_format = 'poly1'\n jqlnocache_format = 'poly1'\n# jqlcache_format = 'points'\n# jqlnocache_format = 'points'\n# generate_csv = False\n \n multipliers = {\n 'jql_2_inc': 50,\n 'jql_2_dem': 50,\n }\n \n @property\n def series(self):\n s = super().series\n new_s = []\n for sid, name, color, style in s:\n mult = self.multipliers.get(sid, None)\n if mult is not None:\n op = ' $\\\\times$ ' if mult >= 1 else ' / '\n if mult < 1:\n mult = 1 / mult\n if round(mult, 3) == round(mult):\n mult = round(mult)\n else:\n mult = round(mult, 3)\n name += op + str(mult)\n new_s.append((sid, name, color, style))\n return new_s\n \n def project_y(self, p):\n y = super().project_y(p)\n if p['prog'] in self.multipliers:\n return y * self.multipliers[p['prog']]\n else:\n return y\n \n max_yitvl = 4\n\nclass Scale2Bigger(Scale2):\n \n prefix = 'results/jql_scale_2_bigger'\n \n class ExpDatagen(Scale2.ExpDatagen):\n prog_suffixes = [\n '_inc',\n '_dem',\n '_java_nocache',\n '_java_cache',\n ]\n \n points = [1000] + list(range(10000, 100000 + 1, 10000))\n \n stddev_window = .1\n min_repeats = 10\n max_repeats = 10\n \n class ExpExtractor(Scale2.ExpExtractor):\n xmin = 0\n xmax = 105\n x_ticklocs = [0, 20, 40, 60, 80, 100]\n\nclass Scale3(Scale):\n \n \"\"\"JQL with caching experiences apparent linear growth from\n x = 5k to about 45k, but then a sudden discontinuity at 50k\n where it jumps four-fold. JQL without caching jumps even\n higher.\n \"\"\"\n \n prefix = 'results/jql_scale_3'\n \n class ExpDatagen(Scale.ExpDatagen):\n level = '3'\n \n # Don't run orig, which takes forever even before\n # reaching the 100 step timeout checkpoint.\n prog_suffixes = [\n '_inc',\n '_dem',\n '_java_nocache',\n '_java_cache',\n ]\n \n class ExpExtractor(Scale.ExpExtractor):\n \n level = '3'\n jqlcache_format = 'poly1'\n jqlnocache_format = 'poly1'\n# jqlcache_format = 'points'\n# jqlnocache_format = 'points'\n# generate_csv = False\n \n multipliers = {\n 'jql_3_inc': 50,\n 'jql_3_dem': 50,\n# 'jql_3_java_nocache': 1e-2,\n# 'jql_3_java_cache': 1e-2,\n }\n \n @property\n def series(self):\n s = super().series\n new_s = []\n for sid, name, color, style in s:\n mult = self.multipliers.get(sid, None)\n if mult is not None:\n op = ' $\\\\times$ ' if mult >= 1 else ' / '\n if mult < 1:\n mult = 1 / mult\n if round(mult, 3) == round(mult):\n mult = round(mult)\n else:\n mult = round(mult, 3)\n name += op + str(mult)\n new_s.append((sid, name, color, style))\n return new_s\n \n def project_y(self, p):\n y = super().project_y(p)\n if p['prog'] in self.multipliers:\n return y * self.multipliers[p['prog']]\n else:\n return y\n \n stddev_window = .1\n min_repeats = 15\n max_repeats = 15\n\nclass Scale3Bigger(Scale3):\n \n prefix = 'results/jql_scale_3_bigger'\n \n class ExpDatagen(Scale3.ExpDatagen):\n prog_suffixes = [\n '_inc',\n '_dem',\n '_java_nocache',\n '_java_cache',\n ]\n \n points = [1000] + list(range(5000, 30000 + 1, 5000))\n \n stddev_window = .1\n min_repeats = 5\n max_repeats = 5\n \n class ExpExtractor(Scale3.ExpExtractor):\n xmin = 0\n xmax = 31\n x_ticklocs = [0, 5, 10, 15, 20, 25, 30]\n" }, { "alpha_fraction": 0.49668875336647034, "alphanum_fraction": 0.5662251710891724, "avg_line_length": 15.777777671813965, "blob_id": "e2e91486237b1c9c626800838d4acf95cc9570a0", "content_id": "843ffca567fc16b49f605d25aa98703d6eabe8d4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 302, "license_type": "no_license", "max_line_length": 54, "num_lines": 18, "path": "/incoq/tests/programs/auxmap/wildcard_in.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "# Auxmaps with equality constraints.\n\nfrom incoq.runtime import *\n\nP = Set()\n\nfor v in [(1, 1, 2), (1, 2, 2), (3, 4, 2), (5, 6, 7)]:\n P.add(v)\n\nprint(sorted(setmatch(P, 'uwb', 2)))\n\nP.remove((1, 1, 2))\n\nprint(sorted(setmatch(P, 'uwb', 2)))\n\nP.remove((1, 2, 2))\n\nprint(sorted(setmatch(P, 'uwb', 2)))\n" }, { "alpha_fraction": 0.5438311696052551, "alphanum_fraction": 0.5925324559211731, "avg_line_length": 23.68000030517578, "blob_id": "46424bb705500e6db78c910c0fd88d39c11ec54b", "content_id": "7f5b4e2f74b7073eeb7adc2b0021a466392bb73a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 616, "license_type": "no_license", "max_line_length": 56, "num_lines": 25, "path": "/incoq/tests/programs/objcomp/autoflatten_out.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from incoq.runtime import *\n# Comp1 := {x : x in R}\nComp1 = RCSet()\ndef _maint_Comp1_R_add(_e):\n # Iterate {v1_x : v1_x in deltamatch(R, 'b', _e, 1)}\n v1_x = _e\n Comp1.add(v1_x)\n\ndef _maint_Comp1_R_remove(_e):\n # Iterate {v2_x : v2_x in deltamatch(R, 'b', _e, 1)}\n v2_x = _e\n Comp1.remove(v2_x)\n\nfor i in range(1, 5):\n # Begin maint Comp1 after \"R.add(i)\"\n _maint_Comp1_R_add(i)\n # End maint Comp1 after \"R.add(i)\"\nprint(sorted(Comp1))\n# Begin maint Comp1 before \"R.remove(3)\"\n_maint_Comp1_R_remove(3)\n# End maint Comp1 before \"R.remove(3)\"\nprint(sorted(Comp1))\nS = Set()\no = Obj()\no.a = 1" }, { "alpha_fraction": 0.5921950936317444, "alphanum_fraction": 0.5934494733810425, "avg_line_length": 34.51980209350586, "blob_id": "d074f6d7200b839972b42edbcb138197ebd8d928", "content_id": "4eb1c4f367e362c949fdd47b6b5ffb86c271e4ed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 21525, "license_type": "no_license", "max_line_length": 79, "num_lines": 606, "path": "/incoq/compiler/central/transform.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "###############################################################################\n# transform.py #\n# Author: Jon Brandvein #\n###############################################################################\n\n\"\"\"Transformation procedure.\"\"\"\n\n\n__all__ = [\n 'preprocess_tree',\n 'transform_ast',\n 'transform_source',\n 'transform_file',\n]\n\n\nimport time\n\nfrom incoq.util.linecount import get_loc_source\nfrom incoq.util.str import quote_items\nimport incoq.compiler.incast as L\nfrom incoq.compiler.set import inc_all_relmatch\nfrom incoq.compiler.comp import (\n patternize_comp, depatternize_all, inc_relcomp, \n impl_auxonly_relcomp, comp_inc_needs_dem,\n comp_isvalid)\nfrom incoq.compiler.aggr import (\n inc_aggr, flatten_smlookups, aggr_needs_batch, aggr_needs_dem,\n aggr_canuse_halfdemand)\nfrom incoq.compiler.obj import to_pairdomain, to_objdomain\nfrom incoq.compiler.demand import deminc_relcomp\nfrom incoq.compiler.tup import (\n flatten_tuples, check_bad_setmatches, flatten_relations)\nfrom incoq.compiler.cost import analyze_costs, eval_coststr\n\nfrom .manager import get_clause_factory, make_manager\nfrom .rewritings import (import_distalgo, get_distalgo_message_sets,\n MacroUpdateRewriter,\n SetTypeRewriter, ObjTypeRewriter, MapOpImporter,\n StrictUpdateRewriter,\n UpdateRewriter, MinMaxRewriter,\n eliminate_deadcode, PassEliminator,\n RelationFinder)\n\n\nclass FunctionUniqueChecker(L.NodeVisitor):\n \n \"\"\"Raise AssertionError if the same function name is defined more\n than once at the top level.\n \"\"\"\n \n def process(self, tree):\n self.names = set()\n super().process(tree)\n \n def visit_FunctionDef(self, node):\n assert node.name not in self.names, \\\n 'Function {} defined multiple times'.format(node.name)\n self.names.add(node.name)\n\nclass OrigSetFinder(L.NodeVisitor):\n \n \"\"\"Find all relation updates in the tree.\"\"\"\n \n def process(self, tree):\n self.rels = set()\n super().process(tree)\n return self.rels\n \n def visit_SetUpdate(self, node):\n if isinstance(node.target, L.Name):\n self.rels.add(L.get_name(node.target))\n\nclass InputQueryMarker(L.QueryMapper):\n \"\"\"Mark queries with an option to indicate that they are\n from (or correspond to a query in) the input program, as opposed\n to something introduced as a by-product of our transformation.\n \"\"\"\n \n def map_Comp(self, node):\n new_options = dict(node.options)\n new_options['in_original'] = True\n return node._replace(options=new_options)\n \n def map_Aggregate(self, node):\n new_options = dict(node.options)\n new_options['in_original'] = True\n return node._replace(options=new_options)\n\ndef is_original(query):\n return query.options.get('in_original', False)\n\nclass OriginalUpdateCounter(L.NodeVisitor):\n \n \"\"\"Count the number of outermost Maintenance nodes which are\n for one of the specified invariants and specified relations.\n \"\"\"\n \n def __init__(self, rels, invs):\n super().__init__()\n self.rels = rels\n self.invs = invs\n \n def process(self, tree):\n self.count = 0\n super().process(tree)\n return self.count\n \n def visit_Maintenance(self, node):\n # Figure out whether this is an update to one of the\n # given sets by scanning the description string. Hackish.\n update_node = L.ps(node.desc)\n target = update_node.target\n is_relevant = (isinstance(target, L.Name) and\n L.get_name(target) in self.rels)\n \n if node.name in self.invs and is_relevant:\n self.count += 1\n # Don't recurse. We don't want to double-count the update,\n # and there shouldn't be any original updates in the\n # inserted precode/postcode.\n else:\n self.generic_visit(node)\n\n\nclass QueryFinder(L.NodeVisitor):\n \n \"\"\"Find the next query to be transformed and return a pair of\n it along with helper information. Return (None, None) if there\n is no query to be transformed.\n \n To be eligible, the query must have an impl besides 'batch' and\n not have the 'notransform' or '_invalid' options. Innermost queries\n are handled first.\n \"\"\"\n \n # Helper info format is a dictionary with entries:\n # 'impl': impl to use (before considering fallbacks)\n # 'in_inccomp': whether this query appears inside a\n # comprehension we expect to incrementalize\n # 'half_demand': (Aggregates only) whether to prefer the half-\n # demand strategy over normal demand\n \n class Found(BaseException):\n def __init__(self, node, info):\n self.node = node\n self.info = info\n \n def __init__(self, opman):\n super().__init__()\n self.opman = opman\n \n def get_query_impl(self, query):\n \"\"\"Given a query, return its impl option if it exists,\n or the global default otherwise. This does not take into\n account optional fallbacks.\n \"\"\"\n impl = self.opman.get_queryopt(query, 'impl')\n if impl is None:\n impl = self.opman.get_opt('default_impl')\n assert impl in ['batch', 'auxonly', 'inc', 'dem']\n return impl\n \n def process(self, tree):\n # Track the number of comprehensions we're currently inside\n # of that have an impl of 'inc' or 'dem' (i.e., a depth).\n # Used to decide whether an aggregate can fallback to 'inc'.\n self.inccomp_depth = 0\n \n # Traverse, but abort as soon as a query is found.\n try:\n super().process(tree)\n except self.Found as f:\n return f.node, f.info\n else:\n return None, None\n \n def visit_Comp(self, node):\n impl = self.get_query_impl(node)\n inccomp = impl in ['inc', 'dem']\n \n if inccomp:\n self.inccomp_depth += 1\n # (Can raise exception.)\n self.generic_visit(node)\n if inccomp:\n self.inccomp_depth -= 1\n \n # We only get here if there is no inner query to transform.\n if (impl != 'batch' and\n not self.opman.get_queryopt(node, 'notransform') and\n not node.options.get('_invalid', False)):\n info = {'impl': impl,\n 'in_inccomp': self.inccomp_depth > 0}\n raise self.Found(node, info)\n \n def visit_Aggregate(self, node):\n impl = self.get_query_impl(node)\n half_demand = self.opman.get_queryopt(node, 'aggr_halfdemand')\n if half_demand is None:\n half_demand = self.opman.get_opt('default_aggr_halfdemand')\n \n # (Can raise exception.)\n self.generic_visit(node)\n \n if (impl in ['inc', 'dem'] and\n not node.options.get('_invalid', False)):\n info = {'impl': impl,\n 'in_inccomp': self.inccomp_depth > 0,\n 'half_demand': half_demand}\n raise self.Found(node, info)\n\n\ndef transform_query(tree, manager, query, info):\n \"\"\"Transform a single query. info is the dictionary returned\n by QueryFinder.\n \"\"\"\n opman = manager.options\n comp_dem_fallback = opman.get_opt('comp_dem_fallback')\n aggr_batch_fallback = opman.get_opt('aggr_batch_fallback')\n aggr_dem_fallback = opman.get_opt('aggr_dem_fallback')\n impl = info['impl']\n in_inccomp = info['in_inccomp']\n \n if isinstance(query, L.Comp):\n # If we can't handle this query, flag it and skip it.\n if is_original(query) and not comp_isvalid(manager, query):\n new_options = dict(query.options)\n new_options['_invalid'] = True\n rewritten_query = query._replace(options=new_options)\n tree = L.QueryReplacer.run(tree, query, rewritten_query)\n \n manager.stats['queries skipped'] += 1\n if manager.options.get_opt('verbose'):\n print('Skipping query ' + L.ts(query))\n return tree\n \n # Flatten lookups (e.g. into aggregate result maps) first,\n # then rewrite patterns. (Opposite order fails to rewrite\n # all occurrences of vars in the condition, since our\n # renamer doesn't catch some cases like demparams of DEMQUERY\n # nodes.)\n rewritten_query = flatten_smlookups(query)\n tree = L.QueryReplacer.run(tree, query, rewritten_query)\n query = rewritten_query\n \n if not opman.get_opt('pattern_in'):\n rewritten_query = patternize_comp(query, manager.factory)\n tree = L.QueryReplacer.run(tree, query, rewritten_query)\n query = rewritten_query\n \n # See if fallback applies.\n if (impl == 'inc' and comp_inc_needs_dem(manager, query) and\n comp_dem_fallback):\n impl = 'dem'\n \n \n name = next(manager.compnamegen)\n \n if impl == 'auxonly':\n tree = impl_auxonly_relcomp(tree, manager, query, name)\n manager.stats['comps expanded'] += 1\n elif impl == 'inc':\n tree = inc_relcomp(tree, manager, query, name)\n elif impl == 'dem':\n tree = deminc_relcomp(tree, manager, query, name)\n else:\n assert()\n \n if impl in ['inc', 'dem']:\n if is_original(query):\n manager.stats['orig queries'] += 1\n manager.stats['incr queries'] += 1\n manager.stats['incr comps'] += 1\n \n elif isinstance(query, L.Aggregate):\n # 'auxonly' doesn't apply to aggregates, but may appear here if\n # it is selected as the default_impl. In any case, treat it as\n # 'batch'.\n if impl == 'auxonly':\n impl = 'batch'\n \n # See if fallbacks apply.\n \n if (impl in ['inc', 'dem'] and aggr_needs_batch(query) and\n aggr_batch_fallback):\n new_options = dict(query.options)\n new_options['_invalid'] = True\n rewritten_query = query._replace(options=new_options)\n tree = L.QueryReplacer.run(tree, query, rewritten_query)\n \n manager.stats['queries skipped'] += 1\n print('Skipping query ' + L.ts(query))\n return tree\n \n if (impl == 'inc' and (in_inccomp or aggr_needs_dem(query)) and\n aggr_dem_fallback):\n impl = 'dem'\n \n if impl in ['inc', 'dem']:\n name = next(manager.aggrnamegen)\n half_demand = (info['half_demand'] and\n aggr_canuse_halfdemand(query))\n \n tree = inc_aggr(tree, manager, query, name,\n demand=(impl=='dem'),\n half_demand=half_demand)\n if is_original(query):\n manager.stats['orig queries'] += 1\n manager.stats['incr queries'] += 1\n manager.stats['incr aggrs'] += 1\n else:\n assert()\n \n \n # Helpful for those long-running transformations.\n manager.stats['queries processed'] += 1\n processed = manager.stats['queries processed']\n if processed % 100 == 0:\n print('---- Transformed {} queries so far ----'.format(processed))\n \n return tree\n\ndef transform_all_queries(tree, manager):\n \"\"\"Process all queries, innermost first.\"\"\"\n query, info = QueryFinder.run(tree, manager.options)\n while query is not None:\n tree = transform_query(tree, manager, query, info)\n query, info = QueryFinder.run(tree, manager.options)\n \n # Mark any invalid comprehensions that weren't already found,\n # so we don't try to do any further relational operations on\n # them.\n class Marker(L.QueryMapper):\n def map_Comp(self, node):\n if not comp_isvalid(manager, node):\n new_options = dict(node.options)\n new_options['_invalid'] = True\n return node._replace(options=new_options)\n \n return Marker.run(tree)\n\n\ndef preprocess_tree(manager, tree, opts):\n \n opman = manager.options\n \n tree = import_distalgo(tree)\n tree = L.import_incast(tree)\n \n # Remove the runtimelib declaration.\n # It will be added back at the end.\n tree = L.remove_runtimelib(tree)\n \n # Complain if there are redundant function definitions.\n FunctionUniqueChecker.run(tree)\n \n # Grab all options.\n tree, opts = L.parse_options(tree, ext_opts=opts)\n nopts, qopts = opts\n opman.import_opts(nopts, qopts)\n \n # Fill in missing param/option info.\n tree, unused = L.attach_qopts_info(tree, opts)\n tree = L.infer_params(tree, obj_domain=opman.get_opt('obj_domain'))\n \n # Error if unused comps in options (prevent typos from causing\n # much frustration).\n if len(unused) > 0:\n raise L.ProgramError('Options given for non-existent queries: ' +\n quote_items(L.ts(c) for c in unused))\n \n return tree, opman\n\n\ndef elim_inputrel_params(tree, input_rels):\n \"\"\"For sets that are input relations, remove these sets from\n the parameter lists of queries.\n \"\"\"\n # XXX: Do this for aggregates as well?\n class Trans(L.QueryMapper):\n def map_Comp(self, node):\n params = tuple(p for p in node.params if p not in input_rels)\n if params != node.params:\n return node._replace(params=params)\n \n return Trans.run(tree)\n\n\ndef transform_ast(tree, *, nopts=None, qopts=None):\n \"\"\"Take a PyAST and return a transformed output PyAST.\n \n nopts and qopts, if not None, specify additional normal and query\n options respectively. They have dictionary format and override the\n input tree's own option specifications.\n \"\"\"\n t1 = time.process_time()\n \n if nopts is None:\n nopts = {}\n if qopts is None:\n qopts = {}\n nopts = nopts.copy()\n qopts = qopts.copy()\n \n manager = make_manager()\n \n tree, opman = preprocess_tree(manager, tree, (nopts, qopts))\n \n verbose = opman.get_opt('verbose')\n objdomain = opman.get_opt('obj_domain')\n objdomain_out = objdomain and opman.get_opt('obj_domain_out')\n typecheck = opman.get_opt('maint_emit_typechecks')\n \n manager.factory = get_clause_factory(use_objdomain=objdomain_out,\n use_typecheck=typecheck)\n \n # Rewrite set/obj types.\n tree = SetTypeRewriter.run(tree, manager.namegen,\n set_literals=True, orig_set_comps=False)\n tree = ObjTypeRewriter.run(tree)\n \n # Import map key assignment/deletion nodes.\n tree = MapOpImporter.run(tree)\n \n # Rewrite non-trivial update operands.\n tree = UpdateRewriter.run(tree, manager.namegen)\n \n # Rewrite for strictness if requested.\n ns_sets = opman.get_opt('nonstrict_sets')\n ns_fields = opman.get_opt('nonstrict_fields')\n ns_maps = opman.get_opt('nonstrict_maps')\n tree = StrictUpdateRewriter.run(\n tree, rewrite_sets=ns_sets, rewrite_fields = ns_fields,\n rewrite_maps = ns_maps)\n \n # Rewrite macro updates.\n tree = MacroUpdateRewriter.run(tree)\n \n input_rels = list(opman.get_opt('input_rels'))\n # Find additional input relations.\n if opman.get_opt('autodetect_input_rels'):\n detected_rels = RelationFinder.run(tree)\n input_rels.extend(r for r in detected_rels\n if r not in input_rels)\n \n # Get type annotations and cost annotations/\n typeann = opman.get_opt('var_types')\n vartypes = {k: L.parse_typestr(v) for k, v in typeann.items()}\n manager.vartypes = vartypes\n \n flatten_rels = opman.get_opt('flatten_rels')\n \n # DistAlgo message sets may be considered as relations\n # and get flattened.\n if opman.get_opt('flatten_distalgo_messages'):\n for s in get_distalgo_message_sets(tree):\n if s not in flatten_rels:\n flatten_rels.append(s)\n if s not in input_rels:\n input_rels.append(s)\n \n # Do the flattening.\n if len(flatten_rels) > 0:\n if verbose:\n print('Flattening relations: ' + ', '.join(flatten_rels))\n # This will also update the manager vartypes.\n tree = flatten_relations(tree, flatten_rels, manager)\n \n tree = elim_inputrel_params(tree, input_rels)\n \n tree = manager.analyze_types(tree)\n \n # Go to the pair domain.\n if objdomain:\n tree = to_pairdomain(tree, manager, input_rels)\n \n # In principle we may need to do another UpdateRewriter run\n # to rewrite F_f.remove(o, o.f) so o.f is saved in a temp\n # variable. In practice I haven't seen this cause any\n # problems yet.\n \n # Rewrite min/max of set unions.\n # Note: Since this happens after pair-domain transformation,\n # we may end up not turning some aggregate arguments into\n # comps.\n tree = MinMaxRewriter.run(tree)\n \n # Flatten nested tuples in queries.\n tree = flatten_tuples(tree)\n \n # Mark all the queries that exist right now as being from\n # the input program, so we can track statistics for input\n # queries versus intermediate queries that we create.\n tree = InputQueryMarker.run(tree)\n original_sets = OrigSetFinder.run(tree)\n \n # Incrementalize queries.\n tree = transform_all_queries(tree, manager)\n \n if not opman.get_opt('pattern_out'):\n tree = depatternize_all(tree, manager.factory)\n \n tree = SetTypeRewriter.run(tree, manager.namegen,\n set_literals=False, orig_set_comps=True)\n \n tree = manager.analyze_types(tree)\n \n if opman.get_opt('analyze_costs'):\n print('Analyzing costs')\n rewrite_types = opman.get_opt('rewrite_costsastypes')\n tree, costs = analyze_costs(manager, tree,\n rewrite_types=rewrite_types,\n warn=True)\n manager.stats['costs'] = costs\n \n # For debugging type information.\n# print(L.ts_typed(tree))\n \n # Incrementalize setmatch queries.\n check_bad_setmatches(tree)\n tree = inc_all_relmatch(tree, manager)\n \n # Count updates to original queries.\n updatecount = OriginalUpdateCounter.run(\n tree, original_sets, manager.original_queryinvs)\n manager.stats['orig updates'] = updatecount\n \n # Eliminate deadcode.\n # Must happen before we return to obj domain, where there\n # could be aliasing.\n if opman.get_opt('deadcode_elim'):\n tree = eliminate_deadcode(\n tree,\n keepvars=opman.get_opt('deadcode_keepvars'),\n obj_domain_out=opman.get_opt('obj_domain_out'),\n verbose=verbose)\n \n # Go back to the object domain.\n if opman.get_opt('obj_domain') and opman.get_opt('obj_domain_out'):\n tree = to_objdomain(tree, manager)\n \n if opman.get_opt('mode') == 'outline':\n tree = L.maint_skeleton(tree)\n \n # Inline maintenance code if requested.\n # Otherwise, just eliminate unused maintenance functions.\n maintfunc_pred = lambda n: n.startswith('_maint_')\n if opman.get_opt('maint_inline'):\n if verbose:\n print('Inlining maintenance functions')\n funcnames = list(L.FuncDefLister.run(tree, maintfunc_pred).keys())\n tree = L.inline_functions(tree, funcnames)\n else:\n if verbose:\n print('Eliminating dead functions')\n tree = L.elim_deadfuncs(tree, maintfunc_pred)\n \n # Expand maintenance nodes away.\n tree = L.MaintExpander.run(tree)\n \n # Eliminate redundant Pass statements. Occurs after eliminating\n # Maint nodes, since that flattens multiple bodies of code\n # together and makes it possible to eliminate more Passes.\n if opman.get_opt('deadcode_elim'):\n tree = PassEliminator.run(tree)\n \n # Add header comments.\n tree = tree._replace(body=tuple(manager.header_comments) + tree.body)\n \n # Convert back to Python AST format.\n tree = L.add_runtimelib(tree)\n tree = L.export_program(tree)\n \n t2 = time.process_time()\n manager.stats['trans time'] = t2 - t1\n \n if verbose:\n print()\n \n return tree, manager\n\ndef transform_source(source, *, nopts=None, qopts=None):\n \"\"\"Like transform_ast, but from source code to source code.\"\"\"\n tree = L.p(source)\n \n tree, manager = transform_ast(tree, nopts=nopts, qopts=qopts)\n \n result = L.ts(tree)\n manager.stats['lines'] = get_loc_source(result)\n return result, manager\n\ndef transform_file(in_filename, out_filename, *, nopts=None, qopts=None):\n \"\"\"Like transform_ast, but from file to file, and no return value.\"\"\"\n with open(in_filename, 'r') as in_file:\n in_source = in_file.read()\n \n out_source, manager = transform_source(in_source, nopts=nopts, qopts=qopts)\n \n eol = manager.options.get_opt('eol')\n eol = {'lf': '\\n', 'crlf': '\\r\\n', 'native': None}[eol]\n \n with open(out_filename, 'w', newline=eol) as out_file:\n out_file.write(out_source)\n \n return manager.stats\n" }, { "alpha_fraction": 0.43193715810775757, "alphanum_fraction": 0.47905758023262024, "avg_line_length": 21.47058868408203, "blob_id": "dfb0f4ca913fdfb977c554f1b32199623fd26ca8", "content_id": "11cc4d30aa0cb24bcb44f1d5bb6652577111aead", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 382, "license_type": "no_license", "max_line_length": 76, "num_lines": 17, "path": "/incoq/tests/programs/deminc/tup/basic_in.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "# Comprehensions with nested tuples, auxonly.\n\nfrom incoq.runtime import *\n\nQUERYOPTIONS(\n '{e for (a2, (b, c)) in R for (b2, (d, e)) in R if a2 == a if b2 == b}',\n impl = 'auxonly',\n)\n\nR = Set()\n\nfor x, y in [(1, (2, 3)), (2, (3, 4)), (3, (4, 5))]:\n R.add((x, y))\n\na = 1\nprint(sorted({e for (a2, (b, c)) in R for (b2, (d, e)) in R\n if a2 == a if b2 == b}))\n" }, { "alpha_fraction": 0.44603174924850464, "alphanum_fraction": 0.49365079402923584, "avg_line_length": 20, "blob_id": "34e5b94d75842fc44e5da7806d416c0abd197cf8", "content_id": "3f2c7de527854e2f8b018275ada4a332673537ce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 630, "license_type": "no_license", "max_line_length": 74, "num_lines": 30, "path": "/incoq/tests/programs/comp/parameter_in.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "# Comprehensions with parameters.\n\nfrom incoq.runtime import *\n\nE = Set()\n\nfor v1, v2 in {(1, 2), (2, 3), (2, 4), (4, 5)}:\n E.add((v1, v2))\n\nx = 1\ny = 5\nQUERYOPTIONS(\n '{z for (x2, y) in E for (y2, z) in E if x == x2 if y == y2}',\n params = ['x'],\n)\nprint(sorted({z for (x2, y) in E for (y2, z) in E if x == x2 if y == y2}))\n\nQUERYOPTIONS(\n '{y for (x2, y) in E if x == x2}',\n params = ['x'],\n impl = 'auxonly',\n)\nprint(sorted({y for (x2, y) in E if x == x2}))\n\nQUERYOPTIONS(\n '{(x, y) for (x, y2) in E if y == y2}',\n params = ['y'],\n impl = 'inc',\n)\nprint(sorted({(x, y) for (x, y2) in E if y == y2}))\n" }, { "alpha_fraction": 0.45787546038627625, "alphanum_fraction": 0.4908424913883209, "avg_line_length": 15.117647171020508, "blob_id": "a78e282234c6aba83c8713b7297dd60304d1b0af", "content_id": "e09d62ad91bf1907f00ec3f86d1d7b759f7f38e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 273, "license_type": "no_license", "max_line_length": 31, "num_lines": 17, "path": "/incoq/tests/programs/objcomp/batch_out.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from incoq.runtime import *\nN = Set()\nfor i in range(1, 5):\n N._add(i)\ns1 = Set()\ns2 = Set()\nfor i in N:\n o = Obj()\n o.i = i\n if (i % 2):\n s1.add(o)\n else:\n s2.add(o)\ns = s1\nprint(sorted({o.i for o in s}))\ns = s2\nprint(sorted({o.i for o in s}))" }, { "alpha_fraction": 0.5707395672798157, "alphanum_fraction": 0.5712754726409912, "avg_line_length": 33.55555725097656, "blob_id": "5a35f337db9c24ce7c868cf640503453002ccfd0", "content_id": "ae8d39ae3c60f1dcf98e542ba98f7ffbbed90e41", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1866, "license_type": "no_license", "max_line_length": 78, "num_lines": 54, "path": "/incoq/compiler/__main__.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Entry point for invoking the system.\"\"\"\n\n\nimport sys\nimport argparse\n\nfrom . import transform_file, print_exc_with_ast \n\n\ndef boolval(s):\n if s.lower() == 'true':\n return True\n elif s.lower() == 'false':\n return False\n else:\n raise argparse.ArgumentTypeError(\"Expected 'true' or 'false'\")\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Incrementalize input_file, '\n 'writing to output_file.')\n \n parser.add_argument('input_file', help=argparse.SUPPRESS)\n parser.add_argument('output_file', help=argparse.SUPPRESS)\n \n general_group = parser.add_argument_group('general options')\n general_group.add_argument('-v', '--verbose', action='store_true',\n default=None,\n help='print transformation details')\n general_group.add_argument('--eol', choices=['native', 'lf', 'crlf'],\n default=None,\n help='end-of-line markers in output file')\n \n trans_group = parser.add_argument_group('transformation options')\n trans_group.add_argument('--obj_domain', type=boolval, metavar='<bool>',\n default=None,\n help='use object-domain flattening')\n \n args = parser.parse_args()\n \n # Arguments that are not provided get set to None by the arg parser.\n # These are not set in nopts so that the system defaults are applied.\n optkeys = ['verbose', 'eol', 'obj_domain']\n nopts = {k: getattr(args, k)\n for k in optkeys\n if getattr(args, k) is not None}\n \n try:\n transform_file(args.input_file, args.output_file, nopts=nopts)\n except Exception as exc:\n print_exc_with_ast()\n sys.exit(1)\n \n print('Done')\n" }, { "alpha_fraction": 0.4841075837612152, "alphanum_fraction": 0.5256723761558533, "avg_line_length": 19.450000762939453, "blob_id": "ddc85b20b691fc9f85143263c6ae8beee2221c84", "content_id": "30a3c5c3daff9960b84977c6eaf26d2b526dad72", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 409, "license_type": "no_license", "max_line_length": 74, "num_lines": 20, "path": "/incoq/tests/programs/deminc/reorder_in.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "# Reorder the clauses for generating the demand graph.\n\nfrom incoq.runtime import *\n\nQUERYOPTIONS(\n '{x for (x, y) in E for (y2, z2) in E if y == y2 if z == z2}',\n params = ['z'],\n impl = 'dem',\n uset_force = False,\n demand_reorder = [1, 0]\n)\n\nE = Set()\n\nfor a, b in {(1, 3), (2, 3), (3, 4)}:\n E.add((a, b))\n\nz = 4\n\nprint(sorted({x for (x, y) in E for (y2, z2) in E if y == y2 if z == z2}))\n" }, { "alpha_fraction": 0.4194815456867218, "alphanum_fraction": 0.4516889154911041, "avg_line_length": 20.576271057128906, "blob_id": "2c1d09f3fd23d70e04be778c980e3768fcdd3978", "content_id": "fbfd416d45ecc1cf01cab6ae9f39154430dd42bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1273, "license_type": "no_license", "max_line_length": 62, "num_lines": 59, "path": "/incoq/tests/util/test_planner.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Unit tests for planner.py.\"\"\"\n\n\nimport unittest\n\nfrom incoq.util.planner import *\n\n\nclass FactorState(State):\n \n def __init__(self, n, factors):\n self.factors = factors\n self.n = n\n \n def accepts(self):\n return True\n \n def get_answer(self):\n return tuple(sorted(self.factors))\n \n def successors(self):\n n = self.n\n states = []\n for i in range(2, n + 1):\n if n % i == 0:\n new_n = n // i\n new_factors = self.factors + (i,)\n states.append(FactorState(new_n, new_factors))\n return states\n\n\nclass PlannerCase(unittest.TestCase):\n \n def test(self):\n init = FactorState(100, ())\n planner = Planner()\n \n res = planner.get_answer(init)\n exp_res = (2, 2, 5, 5)\n self.assertEqual(res, exp_res)\n \n res = planner.get_all_answers(init)\n res = sorted(set(res))\n exp_res = [\n (2, 2, 5, 5),\n (2, 2, 25),\n (2, 5, 10),\n (2, 50),\n (4, 5, 5),\n (4, 25),\n (5, 20),\n (10, 10),\n (100,)\n ]\n self.assertEqual(res, exp_res)\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.418795645236969, "alphanum_fraction": 0.43380820751190186, "avg_line_length": 31.250965118408203, "blob_id": "09a473f5832a23fc79e5651603e4a2776fa4a881", "content_id": "b03306597f0a51a25f7dacd326a9425a79268103", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 41765, "license_type": "no_license", "max_line_length": 76, "num_lines": 1295, "path": "/experiments/twitter/run_twitter_exp.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Run twitter experiments.\"\"\"\n\n### TODO: Consider using a small-world graph generator\n### in place of gendb\n\n\nfrom random import sample, choice\nfrom itertools import product\nimport os\nimport sys\nimport importlib\n\nfrom frexp import (ExpWorkflow, Datagen,\n SimpleExtractor, MetricExtractor,\n TotalSizeExtractor, NormalizedExtractor,\n Printer)\n\nfrom experiments.twitter.gendb_wrapper import (\n gen_pairs, gen_pairs_with_inverse,\n steal_edges, move_edge)\n\nfrom experiments.util import (SmallExtractor, LargeExtractor,\n PosterExtractor, canonize)\n\n\nclass TwitterDatagen(Datagen):\n \n \"\"\"Create users and groups. Each user has a specified number of\n followers. Perform initial queries on each (user, pair) that\n we want to demand (treating that user as a celeb within the query).\n \"NYC\" will always be one of the possible locations.\n \n Then, perform queries interleaved with location updates,\n on random data elements.\n \n Space usage is recorded after all operations are done.\n \n Parameters:\n n_users, n_groups -- number of users and groups\n user_deg -- number of users each user follows\n pad_celeb -- a celeb-follower out-degree to pad celebs up\n to, by stealing followers from other users. If None, take\n celebs as-is.\n group_deg -- number of groups each user is a member of\n n_locs -- number of possible locations (uniform probability)\n n_q_celebs, n_q_groups -- number of demanded celebs and groups\n n_q_pairs -- number of demanded pairs, chosen from the cross-\n product of demanded celebs and groups\n n_u -- number of updates to do\n q_p_u -- number of queries to do before each update\n reps -- number of repeats of the generated timed operations\n need_exact -- if True, require that queried celebs have\n an out-degree of precisely user_deg\n upkind -- kind of updates to do, one of: 'loc', 'celeb'\n celebusertag -- only update users following a tagged celeb\n groupusertag -- only update users in a tagged group\n \"\"\"\n \n def genhelper(self, P):\n n_users, n_groups = P['n_users'], P['n_groups']\n user_deg = P['user_deg']\n pad_celeb = P['pad_celeb']\n group_deg = P['group_deg']\n n_q_celebs, n_q_groups = P['n_q_celebs'], P['n_q_groups']\n n_q_pairs = P['n_q_pairs']\n need_exact = P['need_exact']\n \n # (i, j) in R_follows means user i follows user j\n # (i, j) in R_memberof means user i is in group j\n \n # Normal benchmark: assign followers and group memberships\n # according to the degrees. Celebs are chosen from those\n # users with ideal out-degree first.\n req_inv = n_q_celebs if need_exact else None\n R_follows, Q_celebs = gen_pairs_with_inverse(\n range(n_users), range(n_users), user_deg,\n req_inv=req_inv, max_tries=10)\n Q_celebs = Q_celebs[:n_q_celebs]\n if pad_celeb is not None:\n # Invert the direction of R_follows to run steal_edges().\n R_follows = [(j, i) for i, j in R_follows]\n R_follows = steal_edges(range(n_users), R_follows,\n Q_celebs, pad_celeb)\n R_follows = [(j, i) for i, j in R_follows]\n R_memberof = gen_pairs(range(n_users), range(n_groups), group_deg)\n \n # Debug info.\n# from gendb_wrapper import print_pairinfo, print_deginfo\n# print_pairinfo(R_follows)\n# print_deginfo(R_follows, Q_celebs)\n \n Q_groups = sample(range(n_groups), n_q_groups)\n Q_pairs = sample(list(product(Q_celebs, Q_groups)), n_q_pairs)\n \n return R_follows, R_memberof, Q_pairs\n \n def generate(self, P):\n \"\"\"Return a pair of a dataset with all operations and\n a dataset with no queries in the timed part.\n \"\"\"\n n_users, n_groups = P['n_users'], P['n_groups']\n n_u = P['n_u']\n n_locs = P['n_locs']\n q_p_u = P['q_p_u']\n reps = P['reps']\n upkind = P['upkind']\n celebusertag = P['celebusertag']\n groupusertag = P['groupusertag']\n \n R_follows, R_memberof, Q_pairs = self.genhelper(P)\n \n # \"NYC\" is the one that satisfies the query.\n possible_locs = ['NYC'] + ['loc' + str(i) for i in range(1, n_locs)]\n # i -> loc in R_locs means user i has location loc.\n R_locs = {i: choice(possible_locs) for i in range(n_users)}\n \n # Figure out what users have what tags.\n Q_users = set(i for (i, _) in Q_pairs)\n Q_groups = set(j for (_, j) in Q_pairs)\n celebusertagged_users = set(i for (i, j) in R_follows\n if j in Q_users)\n groupusertagged_users = set(i for (i, j) in R_memberof\n if j in Q_groups)\n \n # Determine what users are fair game for location updating.\n valid_users = set(range(n_users))\n if celebusertag:\n valid_users &= celebusertagged_users\n if groupusertag:\n valid_users &= groupusertagged_users\n # Fail if we can't find a user satisfying the constraints.\n # FIXME: Change this so that we retry the whole dataset\n # generation procedure instead of raising an error.\n if len(valid_users) == 0:\n assert('Dataset generation failure: no valid users')\n \n # For the celeb update kind, store the current celeb-to-\n # follower relation, restricted to demanded celebs.\n Rcelebfollowers = [(y, x) for x, y in R_follows\n if y in Q_users]\n Rcelebfollowers_set = set(Rcelebfollowers)\n \n valid_users = list(valid_users)\n \n OPS_queries = []\n OPS_updates = []\n for _ in range(n_u):\n # Do some queries.\n ops = []\n for _ in range(q_p_u):\n c, g = choice(Q_pairs)\n ops.append((c, g))\n OPS_queries.append(ops)\n \n if upkind == 'loc':\n # Location update.\n i = choice(valid_users)\n \n loc = choice(possible_locs)\n OPS_updates.append((i, loc))\n elif upkind == 'celeb':\n # Following update.\n # Always chooses a queried celebrity.\n c, old_u, new_u = move_edge(\n Rcelebfollowers, Rcelebfollowers_set,\n range(n_users))\n OPS_updates.append((c, old_u, new_u))\n else:\n assert()\n \n \n return dict(\n dsparams = P,\n n_users = n_users,\n n_groups = n_groups,\n Q_pairs = Q_pairs,\n R_follows = R_follows,\n R_memberof = R_memberof,\n R_locs = R_locs,\n OPS_queries = OPS_queries,\n OPS_updates = OPS_updates,\n reps = reps,\n upkind = upkind,\n )\n \n # Set to [False, True] to run two versions, first with all\n # operations and then with updates only (no queries).\n noqs = [False]\n \n def get_tparams_list(self, dsparams_list):\n # Generate trialparams for versions with and without queries.\n return [dict(tid = dsp['dsid'] + '_' + str(noq),\n dsid = dsp['dsid'],\n prog = prog,\n noq = noq)\n for prog in self.progs\n for dsp in dsparams_list\n for noq in self.noqs\n ]\n\n\nclass TwitterDriver:\n \n # Twitter-specific driver. TODO: refactor into frexp.\n \n def __init__(self, pipe_filename):\n import gc\n import pickle\n \n gc.disable()\n \n with open(pipe_filename, 'rb') as pf:\n dataset, prog, other_tparams = pickle.load(pf)\n os.remove(pipe_filename)\n \n \n self.dataset = dataset\n self.prog = prog\n self.noq = other_tparams['noq']\n self.module = None\n self.results = {}\n self.reps = dataset['reps']\n self.upkind = dataset['upkind']\n \n self.setUp()\n \n from frexp.util import StopWatch, user_time\n from time import process_time, perf_counter\n timer_user = StopWatch(user_time)\n timer_cpu = StopWatch(process_time)\n timer_wall = StopWatch(perf_counter)\n \n with timer_user, timer_cpu, timer_wall:\n self.run_demand()\n \n self.results['demtime_user'] = timer_user.consume()\n self.results['demtime_cpu'] = timer_cpu.consume()\n self.results['demtime_wall'] = timer_wall.consume()\n \n with timer_user, timer_cpu, timer_wall:\n self.run_ops()\n \n import incoq.runtime\n self.results['size'] = incoq.runtime.get_total_structure_size(\n self.module.__dict__)\n self.results['opstime_user'] = timer_user.consume()\n self.results['opstime_cpu'] = timer_cpu.consume()\n self.results['opstime_wall'] = timer_wall.consume()\n \n # For the purpose of comparing standard deviation / mean,\n # don't worry about the time spent demanding things.\n self.results['stdmetric'] = self.results['opstime_cpu']\n \n self.tearDown()\n \n \n with open(pipe_filename, 'wb') as pf:\n pickle.dump(self.results, pf)\n \n def setUp(self):\n # Import driven program.\n dirname, filename = os.path.split(self.prog)\n if dirname:\n sys.path.append(dirname)\n try:\n self.module = importlib.import_module(\n 'experiments.twitter.' + filename)\n finally:\n if dirname:\n sys.path.pop()\n \n \n m = self.module\n ds = self.dataset\n \n # Populate dataset.\n self.users = [m.make_user('e' + str(i), ds['R_locs'][i])\n for i in range(ds['n_users'])]\n self.groups = [m.make_group() for i in range(ds['n_groups'])]\n \n for i, j in ds['R_follows']:\n m.follow(self.users[i], self.users[j])\n for i, j in ds['R_memberof']:\n m.join_group(self.users[i], self.groups[j])\n \n # Preprocess operations.\n self.ops_queries = ds['OPS_queries']\n self.ops_updates = ds['OPS_updates']\n if self.noq:\n self.ops_queries = [[] for _ in range(len(self.ops_queries))]\n else:\n for qops in self.ops_queries:\n for i, (c, g) in enumerate(qops):\n qops[i] = (self.users[c], self.groups[g])\n if self.upkind == 'loc':\n for i, (u, loc) in enumerate(self.ops_updates):\n self.ops_updates[i] = (self.users[u], loc)\n elif self.upkind == 'celeb':\n for i, (c, old_u, new_u) in enumerate(self.ops_updates):\n self.ops_updates[i] = \\\n (self.users[c], self.users[old_u], self.users[new_u])\n else:\n assert()\n \n def run_demand(self):\n for c, g in self.dataset['Q_pairs']:\n self.module.do_query(self.users[c], self.groups[g])\n \n def run_ops(self):\n ops_queries = self.ops_queries\n ops_updates = self.ops_updates\n do_query = self.module.do_query_nodemand\n follow = self.module.follow\n unfollow = self.module.unfollow\n change_loc = self.module.change_loc\n \n assert len(ops_queries) == len(ops_updates)\n \n if self.upkind == 'loc':\n for _ in range(self.reps):\n for qops, (u, loc) in zip(ops_queries, ops_updates):\n for c, g in qops:\n do_query(c, g)\n change_loc(u, loc)\n elif self.upkind == 'celeb':\n for _ in range(self.reps):\n for qops, (c, old_u, new_u) in \\\n zip(ops_queries, ops_updates):\n for c2, g in qops:\n do_query(c2, g)\n unfollow(old_u, c)\n follow(new_u, c)\n else:\n assert()\n \n def tearDown(self):\n pass\n\nclass TwitterProfileDriver(TwitterDriver):\n \n def __init__(self, pipe_filename):\n import line_profiler\n import builtins\n self.prof = line_profiler.LineProfiler()\n builtins.__dict__['profile'] = self.prof\n super().__init__(pipe_filename)\n \n# def run_ops(self):\n# super().run_ops()\n \n def tearDown(self):\n print()\n self.prof.print_stats()\n super().tearDown()\n\nclass TwitterVerifyDriver(TwitterDriver):\n \n # Twitter-specific driver. TODO: refactor into frexp.\n \n condense_output = True\n \n def log_output(self, output):\n canon_value = canonize(output, use_hash=self.condense_output)\n self.results['output'].append(canon_value)\n \n def __init__(self, pipe_filename):\n import gc\n import pickle\n \n gc.disable()\n \n with open(pipe_filename, 'rb') as pf:\n dataset, prog, other_tparams = pickle.load(pf)\n os.remove(pipe_filename)\n \n \n self.dataset = dataset\n self.prog = prog\n self.noq = other_tparams['noq']\n self.module = None\n self.results = {}\n self.reps = dataset['reps']\n self.upkind = dataset['upkind']\n \n self.setUp()\n \n from frexp.util import StopWatch, user_time\n from time import process_time, perf_counter\n timer_user = StopWatch(user_time)\n timer_cpu = StopWatch(process_time)\n timer_wall = StopWatch(perf_counter)\n \n self.results['output'] = []\n \n with timer_user, timer_cpu, timer_wall:\n self.run_demand()\n \n with timer_user, timer_cpu, timer_wall:\n self.run_ops()\n \n self.tearDown()\n \n self.results['output'] = canonize(self.results['output'],\n use_hash=self.condense_output)\n \n with open(pipe_filename, 'wb') as pf:\n pickle.dump(self.results, pf)\n \n def run_ops(self):\n ops_queries = self.ops_queries\n ops_updates = self.ops_updates\n do_query = self.module.do_query_nodemand\n follow = self.module.follow\n unfollow = self.module.unfollow\n change_loc = self.module.change_loc\n \n assert len(ops_queries) == len(ops_updates)\n \n if self.upkind == 'loc':\n for _ in range(self.reps):\n for qops, (u, loc) in zip(ops_queries, ops_updates):\n for c, g in qops:\n output = do_query(c, g)\n self.log_output(output)\n change_loc(u, loc)\n elif self.upkind == 'celeb':\n for _ in range(self.reps):\n for qops, (c, old_u, new_u) in \\\n zip(ops_queries, ops_updates):\n for c2, g in qops:\n output = do_query(c2, g)\n self.log_output(output)\n unfollow(old_u, c)\n follow(new_u, c)\n else:\n assert()\n\n\nclass TwitterExtractor(SimpleExtractor, SmallExtractor):\n \n \"\"\"Extractor that distinguishes all-ops runs from\n no-queries runs.\n \"\"\"\n \n def scale(self, y, sid):\n # Hook for scaling after project_y() and the subtraction\n # have already run.\n return y\n \n # The series ids are pairs. The second component can be\n # one of 'all', 'updates', or 'queries'. The first two\n # refer to runs without and with the no-queries modifier.\n # The third refers to a virtual series computed by\n # subtracting the averages of updates from all.\n \n def get_series_points(self, datapoints, sid, *,\n average):\n inner_sid, kind = sid\n \n # Grab all data for the inner sid, and split based on noq.\n inner_data = self.get_series_data(datapoints, inner_sid)\n q_data = [p for p in inner_data\n if not p['noq']]\n noq_data = [p for p in inner_data\n if p['noq']]\n \n # For 'all' and 'updates', normal behavior on the q or\n # noq points.\n if kind == 'all':\n data = self.project_and_average_data(q_data, average=average)\n elif kind == 'updates':\n data = self.project_and_average_data(noq_data, average=average)\n elif kind == 'queries':\n q_avg = self.project_and_average_data(q_data, average=True)\n noq_avg = self.project_and_average_data(noq_data, average=True)\n \n data = []\n for ((ax, ay, _alo, _ahi), (ux, uy, _ulo, _uhi)) in \\\n zip(q_avg, noq_avg):\n assert ax == ux\n # Use 0 for errorbar data.\n data.append((ax, ay - uy, 0, 0))\n else:\n assert()\n \n for i, (x, y, lo, hi) in enumerate(data):\n data[i] = (x, self.scale(y, sid),\n self.scale(lo, sid), self.scale(hi, sid))\n \n return data\n\n\nclass TwitterWorkflow(ExpWorkflow):\n \n ExpDatagen = TwitterDatagen\n ExpExtractor = TwitterExtractor\n ExpDriver = TwitterDriver\n ExpVerifyDriver = TwitterVerifyDriver\n \n require_ac = False ###\n\n\nclass Scale(TwitterWorkflow):\n \n \"\"\"Increase number of users and degree of users and groups\n proportionally. Low demand.\n \"\"\"\n \n prefix = 'results/twitter_scale'\n \n class ExpDatagen(TwitterWorkflow.ExpDatagen):\n \n progs = [\n 'twitter_orig',\n 'twitter_inc',\n 'twitter_dem',\n ]\n \n noqs = [\n False,\n True,\n ]\n \n def get_dsparams_list(self):\n return [\n dict(\n dsid = str(x),\n x = 2000 * x,\n \n n_users = 2000 * x,\n n_groups = 20 * x,\n pad_celeb = None,\n \n user_deg = 10 * x,\n group_deg = 1 * x,\n \n n_locs = 20,\n \n n_q_celebs = 3,\n n_q_groups = 1,\n n_q_pairs = 3,\n \n n_u = 200000,\n q_p_u = 1,\n reps = 1,\n \n need_exact = True,\n upkind = 'loc',\n celebusertag = False,\n groupusertag = False,\n )\n for x in range(1, 10 + 1)\n ]\n \n stddev_window = .1\n min_repeats = 10\n max_repeats = 50\n \n class ExpExtractor(TwitterWorkflow.ExpExtractor):\n \n xlabel = 'Number of users (in thousands)'\n \n def project_x(self, p):\n return super().project_x(p) / 1e3\n \n xmin = 1\n xmax = 21\n x_ticklocs = [0, 4, 8, 12, 16, 20]\n\nclass ScaleTime(Scale):\n \n class ExpExtractor(Scale.ExpExtractor,\n MetricExtractor):\n \n series = [\n# (('twitter_orig', 'all'), 'original',\n# 'red', '-- s points'),\n# (('twitter_inc', 'all'), 'incremental',\n# 'blue', '-- o line'),\n# (('twitter_dem', 'all'), 'filtered',\n# 'green', '-- ^ line'),\n \n (('twitter_orig', 'queries'), 'original query',\n 'red', '1-2 s poly1'),\n (('twitter_orig', 'updates'), 'original update',\n 'red', '1-4 _s poly1'),\n (('twitter_inc', 'queries'), 'incremental query',\n 'blue', '1-2 o poly1'),\n (('twitter_inc', 'updates'), 'incremental update',\n 'blue', '1-4 _o poly1'),\n (('twitter_dem', 'queries'), 'filtered query',\n 'green', '1-2 ^ poly1'),\n (('twitter_dem', 'updates'), 'filtered update',\n 'green', '1-4 _^ poly1'),\n ]\n \n# def scale(self, y, sid):\n# if sid in [('twitter_orig', 'updates'),\n# ('twitter_inc', 'queries'),\n# ('twitter_dem', 'queries')]:\n# return y * 5\n# else:\n# return y\n \n @property\n def rcparams(self):\n return dict(super().rcparams,\n **{'legend.handletextpad': .4,\n 'legend.borderaxespad': .2})\n \n metric = 'opstime_cpu'\n \n ylabel = 'Running time (in seconds)'\n ymin = 0\n ymax = 3.5\n y_ticklocs = [0, 1, 2, 3, 4]\n \n imagename = 'time'\n\nclass ScaleTimePoster(ScaleTime):\n \n class ExpExtractor(PosterExtractor, ScaleTime.ExpExtractor):\n \n series = [\n (('twitter_orig', 'queries'), 'orig. query',\n 'red', '3-6 s poly1'),\n (('twitter_inc', 'queries'), 'incr. query',\n 'blue', '3-6 o poly1'),\n (('twitter_dem', 'queries'), 'dem. query',\n 'green', '3-6 ^ poly1'),\n (('twitter_orig', 'updates'), 'orig. update',\n 'red', '3-16 _s poly1'),\n (('twitter_inc', 'updates'), 'incr. update',\n 'blue', '3-16 _o poly1'),\n (('twitter_dem', 'updates'), 'dem. update',\n 'green', '3-16 _^ poly1'),\n ]\n \n xlabel = 'Users (thousands)'\n ylabel = 'Time (s)'\n \n figsize = (8, 7)\n tightlayout_bbox = (0, .25, 1, 1)\n legend_bbox = (0, -.45, 1, .20)\n legend_loc = 'upper center'\n legend_ncol = 2\n \n @property\n def rcparams(self):\n return dict(super().rcparams,\n **{'legend.borderaxespad': .2,\n 'legend.handlelength': 1.7})\n\nclass ScaleSize(Scale):\n \n class ExpExtractor(Scale.ExpExtractor,\n TotalSizeExtractor):\n \n series = [\n (('twitter_inc', 'all'), 'incremental',\n 'blue', '- o poly2'),\n (('twitter_dem', 'all'), 'filtered $\\\\times$ 100',\n 'green', '- ^ poly1'),\n ]\n \n demscale = 100\n \n max_xitvls = 5\n ylabel = 'Add\\'l space (in millions)'\n def project_y(self, p):\n y = super().project_y(p)\n # Scale dem separately from inc.\n if p['prog'] == 'twitter_dem':\n return y / 1e6 * self.demscale\n return y / 1e6\n \n imagename = 'size'\n\nclass ScaleSizePoster(ScaleSize):\n \n class ExpExtractor(PosterExtractor, ScaleSize.ExpExtractor):\n \n series = [\n (('twitter_inc', 'all'), 'incr.',\n 'blue', '- o poly2'),\n (('twitter_dem', 'all'), 'incr. w/ demand (x 10)',\n 'green', '- ^ poly1'),\n ]\n \n demscale = 10\n \n xlabel = 'Users (thousands)'\n ylabel = 'Add\\'l size (millions)'\n\n\nclass Demand(TwitterWorkflow):\n \n \"\"\"Increase number of demanded pairs.\"\"\"\n \n prefix = 'results/twitter_demand'\n \n class ExpDatagen(TwitterWorkflow.ExpDatagen):\n \n progs = [\n 'twitter_inc',\n 'twitter_dem',\n ]\n \n def get_dsparams_list(self):\n return [\n dict(\n dsid = str(x),\n x = x,\n \n n_users = 20000,\n n_groups = 200,\n pad_celeb = None,\n \n user_deg = 100,\n group_deg = 10,\n \n n_locs = 20,\n \n n_q_celebs = x,\n n_q_groups = 1,\n n_q_pairs = x,\n \n n_u = 200000,\n q_p_u = 1,\n reps = 1,\n \n need_exact = False,\n upkind = 'loc',\n celebusertag = True,\n groupusertag = True,\n )\n for x in [1] + list(range(2000, 20001, 2000))\n ]\n \n stddev_window = .1\n min_repeats = 10\n max_repeats = 50\n \n class ExpExtractor(TwitterWorkflow.ExpExtractor):\n \n series = [\n (('twitter_inc', 'all'), 'incremental',\n 'blue', '- o poly1'),\n (('twitter_dem', 'all'), 'filtered',\n 'green', '- ^ poly1'),\n ]\n \n# xlabel = 'Number of users in \\\\texttt{U} (in thousands)'\n # For Annie's writing, use \"demand\" instead of U.\n xlabel = 'Number of users in \\\\texttt{demand} (in thousands)'\n \n def project_x(self, p):\n return super().project_x(p) / 1e3\n \n xmin = -1\n xmax = 21\n x_ticklocs = [0, 4, 8, 12, 16, 20]\n\nclass DemandTime(Demand):\n \n class ExpExtractor(Demand.ExpExtractor,\n MetricExtractor):\n \n ymin = 0\n y_ticklocs = [0, 2, 4, 6, 8]\n\nclass DemandTimeOps(DemandTime):\n \n class ExpExtractor(DemandTime.ExpExtractor):\n \n metric = 'opstime_cpu'\n ylabel = 'Running time (in seconds)'\n \n imagename = 'time'\n\nclass DemandTimeDem(DemandTime):\n \n class ExpExtractor(DemandTime.ExpExtractor):\n metric = 'demtime_cpu'\n ylabel = 'Demand time (in seconds)'\n# y_ticklocs = [5, 10, 15, 20, 25, 30]\n \n imagename = 'demtime'\n\nclass DemandTimeTotal(DemandTime):\n \n class ExpExtractor(DemandTime.ExpExtractor):\n ylabel = 'Total time (in seconds)'\n# y_ticklocs = [5, 10, 15, 20, 25, 30]\n def project_y(self, p):\n return p['results']['demtime_cpu'] + p['results']['opstime_cpu']\n \n imagename = 'time_plusdemand'\n\nclass DemandSize(Demand):\n \n class ExpExtractor(Demand.ExpExtractor,\n TotalSizeExtractor):\n \n ylabel = 'Add\\'l space (in millions)'\n \n def project_y(self, p):\n return super().project_y(p) / 1e6\n \n y_ticklocs = [0, 1, 2, 3, 4]\n \n imagename = 'size'\n\n\nclass Factor(TwitterWorkflow):\n \n \"\"\"Do queries and following updates on a fixed dataset.\n \n Eight variants ({A, B, C, D} x {1, 2}):\n \n 1A) 20,000 users as in rightmost datapoint of Scale\n 1B) 20,000 users as in rightmost datapoint of Demand\n 1C) 10,000 users as in middle datapoint of Demand\n except each user follows 2% (200) of all users\n 1D) 2,000 users as in leftmost datapoint of Demand\n except each user follows 50% (1,000) of all users\n \n Note that A-D have the same number of groups and group\n degree, and B-D have 2 million following pairs.\n \n 2A through 2D are the same as their counterparts except\n that we update celebrity following relationships instead\n of user locations. The updated celebrity is always a\n demanded one.\n \"\"\"\n \n class ExpDatagen(TwitterWorkflow.ExpDatagen):\n \n progs = [\n 'twitter_dem',\n \n# 'twitter_dem_aug',\n# 'twitter_dem_das',\n \n# 'twitter_dem_noninline',\n 'twitter_dem_norcelim',\n 'twitter_dem_notypecheck',\n# 'twitter_dem_handopt',\n# 'twitter_dem_noalias',\n# 'twitter_dem_notypecheck_noalias',\n ]\n \n stddev_window = .1\n min_repeats = 10\n max_repeats = 50\n \n class ExpExtractor(TwitterWorkflow.ExpExtractor):\n \n xlabel = 'x'\n \n class ExpViewer(Printer):\n \n transpose = True\n \n def round_y(self, y):\n return round(y, 3)\n\nclass Factor1A(Factor):\n \n prefix = 'results/twitter_factor1a'\n \n class ExpDatagen(Factor.ExpDatagen):\n \n def get_dsparams_list(self):\n return [\n dict(\n dsid = str(x),\n x = x,\n \n n_users = 20000,\n n_groups = 200,\n pad_celeb = None,\n \n user_deg = 100,\n group_deg = 10,\n \n n_locs = 20,\n \n n_q_celebs = 3,\n n_q_groups = 1,\n n_q_pairs = 3,\n \n n_u = 200000,\n q_p_u = 1,\n reps = 1,\n \n need_exact = True,\n upkind = 'loc',\n celebusertag = False,\n groupusertag = False,\n )\n for x in [1]\n ]\n\nclass Factor1B(Factor):\n \n prefix = 'results/twitter_factor1b'\n \n class ExpDatagen(Factor.ExpDatagen):\n \n def get_dsparams_list(self):\n return [\n dict(\n dsid = str(x),\n x = x,\n \n n_users = 20000,\n n_groups = 200,\n pad_celeb = None,\n \n user_deg = 100,\n group_deg = 10,\n \n n_locs = 20,\n \n n_q_celebs = 20000,\n n_q_groups = 1,\n n_q_pairs = 20000,\n \n n_u = 200000,\n q_p_u = 1,\n reps = 1,\n \n need_exact = False,\n upkind = 'loc',\n celebusertag = True,\n groupusertag = True,\n )\n for x in [1]\n ]\n\nclass Factor1C(Factor):\n \n prefix = 'results/twitter_factor1c'\n \n class ExpDatagen(Factor.ExpDatagen):\n \n def get_dsparams_list(self):\n return [\n dict(\n dsid = str(x),\n x = x,\n \n n_users = 10000,\n n_groups = 200,\n pad_celeb = None,\n \n user_deg = 200,\n group_deg = 10,\n \n n_locs = 20,\n \n n_q_celebs = 10000,\n n_q_groups = 1,\n n_q_pairs = 10000,\n \n n_u = 200000,\n q_p_u = 1,\n reps = 1,\n \n need_exact = False,\n upkind = 'loc',\n celebusertag = True,\n groupusertag = True,\n )\n for x in [1]\n ]\n\nclass Factor1D(Factor):\n \n prefix = 'results/twitter_factor1d'\n \n class ExpDatagen(Factor.ExpDatagen):\n \n def get_dsparams_list(self):\n return [\n dict(\n dsid = str(x),\n x = x,\n \n n_users = 2000,\n n_groups = 200,\n pad_celeb = None,\n \n user_deg = 1000,\n group_deg = 10,\n \n n_locs = 20,\n \n n_q_celebs = 2000,\n n_q_groups = 1,\n n_q_pairs = 2000,\n \n n_u = 200000,\n q_p_u = 1,\n reps = 1,\n \n need_exact = False,\n upkind = 'loc',\n celebusertag = True,\n groupusertag = True,\n )\n for x in [1]\n ]\n\nclass Factor2A(Factor):\n \n prefix = 'results/twitter_factor2a'\n \n class ExpDatagen(Factor.ExpDatagen):\n \n def get_dsparams_list(self):\n return [\n dict(\n dsid = str(x),\n x = x,\n \n n_users = 20000,\n n_groups = 200,\n pad_celeb = None,\n \n user_deg = 100,\n group_deg = 10,\n \n n_locs = 20,\n \n n_q_celebs = 3,\n n_q_groups = 1,\n n_q_pairs = 3,\n \n n_u = 200000,\n q_p_u = 1,\n reps = 1,\n \n need_exact = True,\n upkind = 'celeb',\n celebusertag = False,\n groupusertag = False,\n )\n for x in [1]\n ]\n\nclass Factor2B(Factor):\n \n prefix = 'results/twitter_factor2b'\n \n class ExpDatagen(Factor.ExpDatagen):\n \n def get_dsparams_list(self):\n return [\n dict(\n dsid = str(x),\n x = x,\n \n n_users = 20000,\n n_groups = 200,\n pad_celeb = None,\n \n user_deg = 100,\n group_deg = 10,\n \n n_locs = 20,\n \n n_q_celebs = 20000,\n n_q_groups = 1,\n n_q_pairs = 20000,\n \n n_u = 200000,\n q_p_u = 1,\n reps = 1,\n \n need_exact = False,\n upkind = 'celeb',\n celebusertag = True,\n groupusertag = True,\n )\n for x in [1]\n ]\n\nclass Factor2C(Factor):\n \n prefix = 'results/twitter_factor2c'\n \n class ExpDatagen(Factor.ExpDatagen):\n \n def get_dsparams_list(self):\n return [\n dict(\n dsid = str(x),\n x = x,\n \n n_users = 10000,\n n_groups = 200,\n pad_celeb = None,\n \n user_deg = 200,\n group_deg = 10,\n \n n_locs = 20,\n \n n_q_celebs = 10000,\n n_q_groups = 1,\n n_q_pairs = 10000,\n \n n_u = 200000,\n q_p_u = 1,\n reps = 1,\n \n need_exact = False,\n upkind = 'celeb',\n celebusertag = True,\n groupusertag = True,\n )\n for x in [1]\n ]\n\nclass Factor2D(Factor):\n \n prefix = 'results/twitter_factor2d'\n \n class ExpDatagen(Factor.ExpDatagen):\n \n def get_dsparams_list(self):\n return [\n dict(\n dsid = str(x),\n x = x,\n \n n_users = 2000,\n n_groups = 200,\n pad_celeb = None,\n \n user_deg = 1000,\n group_deg = 10,\n \n n_locs = 20,\n \n n_q_celebs = 2000,\n n_q_groups = 1,\n n_q_pairs = 2000,\n \n n_u = 200000,\n q_p_u = 1,\n reps = 1,\n \n need_exact = False,\n upkind = 'celeb',\n celebusertag = True,\n groupusertag = True,\n )\n for x in [1]\n ]\n\nclass FactorTime(Factor):\n \n class ExpExtractor(Factor.ExpExtractor,\n MetricExtractor):\n \n series = [\n# (('twitter_dem', 'all'), 'filtered (normal)',\n# 'green', '-- ^ normal'),\n \n# (('twitter_dem_aug', 'all'), 'augmented',\n# 'blue', '-- o normal'),\n# (('twitter_dem_das', 'all'), 'das.',\n# 'cyan', '-- o normal'),\n \n# (('twitter_dem_noninline', 'all'), 'non-inlined',\n# 'orange', '-- s normal'),\n (('twitter_dem_norcelim', 'all'), 'no RC elim',\n 'red', '-- s normal'),\n (('twitter_dem_notypecheck', 'all'), 'no type checks',\n 'yellow', '-- s normal'),\n# (('twitter_dem_handopt', 'all'), 'hand-optimized',\n# 'fuchsia', '-- s normal'),\n# (('twitter_dem_noalias', 'all'), 'alias-optimized',\n# 'purple', '-- s normal'),\n# (('twitter_dem_notypecheck_noalias', 'all'),\n# 'no type checks + alias-opt',\n# 'lightpurple', '-- s normal'),\n ]\n \n metric = 'opstime_cpu'\n \n ylabel = 'Running time (in seconds)'\n ymin = 0\n \n imagename = 'time'\n\nclass FactorTimeNorm(FactorTime):\n \n class ExpExtractor(NormalizedExtractor,\n FactorTime.ExpExtractor):\n \n base_sid = ('twitter_dem', 'all')\n \n def normalize(self, pre_y, base_y):\n return pre_y / base_y\n\nclass Factor1ATimeNorm(Factor1A, FactorTimeNorm):\n pass\nclass Factor1BTimeNorm(Factor1B, FactorTimeNorm):\n pass\nclass Factor1CTimeNorm(Factor1C, FactorTimeNorm):\n pass\nclass Factor1DTimeNorm(Factor1D, FactorTimeNorm):\n pass\nclass Factor2ATimeNorm(Factor2A, FactorTimeNorm):\n pass\nclass Factor2BTimeNorm(Factor2B, FactorTimeNorm):\n pass\nclass Factor2CTimeNorm(Factor2C, FactorTimeNorm):\n pass\nclass Factor2DTimeNorm(Factor2D, FactorTimeNorm):\n pass\n\n\nclass Tag(TwitterWorkflow):\n \n prefix = 'results/twitter_tag'\n \n class ExpDatagen(TwitterWorkflow.ExpDatagen):\n \n progs = [\n 'twitter_dem',\n 'twitter_dem_singletag',\n ]\n \n def get_dsparams_list(self):\n return [\n dict(\n dsid = str(x),\n x = 2000 * x,\n \n n_users = 2000 * x,\n n_groups = 20 * x,\n pad_celeb = None,\n \n user_deg = 10 * x,\n group_deg = 5,\n \n n_locs = 20,\n \n n_q_celebs = 2000 * x,\n n_q_groups = 1,\n n_q_pairs = 2000 * x,\n \n n_u = 200000,\n q_p_u = 1,\n reps = 1,\n \n need_exact = False,\n upkind = 'loc',\n celebusertag = False,\n groupusertag = False,\n )\n for x in range(1, 10 + 1)\n ]\n \n stddev_window = .1\n min_repeats = 10\n max_repeats = 50\n \n class ExpExtractor(Scale.ExpExtractor):\n pass\n\nclass TagTime(Tag):\n \n class ExpExtractor(Tag.ExpExtractor,\n MetricExtractor):\n \n series = [\n (('twitter_dem_singletag', 'all'), 'OSQ strategy',\n 'orange', '-- ^ poly1'),\n (('twitter_dem', 'all'), 'filtered',\n 'green', '- ^ poly1'),\n ]\n \n metric = 'opstime_cpu'\n \n ylabel = 'Running time (in seconds)'\n ymin = 0\n y_ticklocs = [0, 1, 2, 3, 4, 5]\n \n imagename = 'time'\n" }, { "alpha_fraction": 0.5234804749488831, "alphanum_fraction": 0.5261504650115967, "avg_line_length": 30.834999084472656, "blob_id": "3000dd485a129857a6251b659d7409a58447828f", "content_id": "c60b158e6cbaf9155f25486bf19304663d8e7ca7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6367, "license_type": "no_license", "max_line_length": 64, "num_lines": 200, "path": "/incoq/tests/invinc/incast/test_helpers.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Unit tests for helpers.py.\"\"\"\n\n\nimport unittest\n\nfrom incoq.compiler.incast.nodes import *\nfrom incoq.compiler.incast.structconv import parse_structast\nfrom incoq.compiler.incast.helpers import *\n\n\nclass HelpersCase(unittest.TestCase):\n \n def p(self, source, mode=None, **kargs):\n return parse_structast(source, mode=mode, **kargs)\n \n def pc(self, source, **kargs):\n return self.p(source, mode='code', **kargs)\n \n def ps(self, source, **kargs):\n return self.p(source, mode='stmt', **kargs)\n \n def pe(self, source, **kargs):\n return self.p(source, mode='expr', **kargs)\n \n def test_ln(self):\n node = ln('foo')\n exp_node = Name('foo', Load())\n self.assertEqual(node, exp_node)\n \n def test_tuplify(self):\n tree = tuplify(['foo', 'bar', Num(n=0)])\n exp_tree = self.pe('(foo, bar, 0)')\n self.assertEqual(tree, exp_tree)\n \n tree = tuplify(['foo'])\n exp_tree = ln('foo')\n self.assertEqual(tree, exp_tree)\n \n tree = tuplify([])\n exp_tree = Tuple((), Load())\n self.assertEqual(tree, exp_tree)\n \n tree = tuplify(['foo', 'bar', Num(n=0)], lval=True)\n exp_tree = self.p('(foo, bar, 0)', mode='lval')\n self.assertEqual(tree, exp_tree)\n \n tree = tuplify([], lval=True)\n exp_tree = sn('_')\n self.assertEqual(tree, exp_tree)\n \n def test_cmp(self):\n tree = self.pe('a in b')\n exp_tree = cmpin(ln('a'), ln('b'))\n self.assertEqual(tree, exp_tree)\n \n def test_plainfunc(self):\n func = self.ps('def foo(x): pass')\n self.assertTrue(is_plainfuncdef(func))\n \n func2 = plainfuncdef('foo', ['x'], self.pc('pass'))[0]\n self.assertEqual(func, func2)\n \n res = get_plainfuncdef(func2)\n self.assertEqual(res, ('foo', ('x',), self.pc('pass')))\n \n func = self.ps('def foo(x:y): pass')\n self.assertFalse(is_plainfuncdef(func))\n \n def test_varassign(self):\n tree = self.p('a = b + c', mode='stmt')\n id, val = get_varassign(tree)\n self.assertEqual(id, 'a')\n self.assertEqual(val, self.pe('b + c'))\n \n # Try failure.\n tree = self.p('a + b', mode='stmt')\n with self.assertRaises(TypeError):\n get_varassign(tree)\n self.assertFalse(is_varassign(tree))\n \n def test_vartuple(self):\n tree = self.pe('(a, b)')\n res = get_vartuple(tree)\n self.assertEqual(res, ('a', 'b'))\n \n tree = self.pe('a')\n res = get_vartuple(tree)\n self.assertEqual(res, ('a',))\n \n def test_is(self):\n self.assertTrue(is_vartuple(self.pe('(a, b)')))\n self.assertFalse(is_vartuple(self.pe('(a + b, c)')))\n \n def test_name(self):\n tree = self.pe('a')\n res = get_name(tree)\n self.assertEqual(res, 'a')\n \n def test_pat_cmp(self):\n tree = self.pe('a < b')\n left, op, right = get_cmp(tree)\n self.assertEqual(left, self.pe('a'))\n self.assertIsInstance(op, Lt)\n self.assertEqual(right, self.pe('b'))\n \n def test_vareqcmp(self):\n tree = self.pe('a == b == c')\n vars = get_vareqcmp(tree)\n self.assertEqual(vars, ('a', 'b', 'c'))\n \n def test_singletonset(self):\n tree = self.pe('{1}')\n res = get_singletonset(tree)\n self.assertEqual(res, self.pe('1'))\n \n def test_singadd(self):\n tree = self.pe('R + {e}')\n left, right = get_singadd(tree)\n self.assertEqual(left, self.pe('R'))\n self.assertEqual(right, self.pe('e'))\n \n def test_singsub(self):\n tree = self.pe('R - {e}')\n left, right = get_singsub(tree)\n self.assertEqual(left, self.pe('R'))\n self.assertEqual(right, self.pe('e'))\n \n def test_namematch(self):\n tree = SetMatch(self.pe('R'), 'bu', self.pe('a'))\n id, mask, key = get_namematch(tree)\n self.assertEqual(id, 'R')\n self.assertEqual(mask, 'bu')\n self.assertEqual(key, self.pe('a'))\n \n def test_namesmlookup(self):\n tree = SMLookup(self.pe('R'), 'bu', self.pe('a'), None)\n id, mask, key = get_namesmlookup(tree)\n self.assertEqual(id, 'R')\n self.assertEqual(mask, 'bu')\n self.assertEqual(key, self.pe('a'))\n \n def test_attrassign(self):\n tree = self.ps('a.b.c = 5')\n rec, attr, val = get_attrassign(tree)\n self.assertEqual(rec, self.pe('a.b'))\n self.assertEqual(attr, 'c')\n self.assertEqual(val, self.pe('5'))\n \n def test_delattr(self):\n tree = self.ps('del a.b.c')\n rec, attr = get_delattr(tree)\n self.assertEqual(rec, self.pe('a.b'))\n self.assertEqual(attr, 'c')\n \n def test_mapassign(self):\n tree = self.ps('a[b.c] = 5')\n map, key, val = get_mapassign(tree)\n self.assertEqual(map, self.pe('a'))\n self.assertEqual(key, self.pe('b.c'))\n self.assertEqual(val, self.pe('5'))\n \n tree = self.ps('globals()[a] = 5')\n self.assertFalse(is_mapassign(tree))\n \n def test_delmap(self):\n tree = self.ps('del a[b.c]')\n map, key = get_delmap(tree)\n self.assertEqual(map, self.pe('a'))\n self.assertEqual(key, self.pe('b.c'))\n \n tree = self.ps('del globals()[a]')\n self.assertFalse(is_delmap(tree))\n \n def test_importstar(self):\n tree = self.ps('from foo import *')\n mod = get_importstar(tree)\n self.assertEqual(mod, 'foo')\n \n def test_setunion(self):\n comp = Comp(Name('b', Load()),\n (Enumerator(Name('b', Store()),\n Name('S', Load())),),\n (), {})\n tree = self.pe('{a} | COMP | S', subst={'COMP': comp})\n parts = get_setunion(tree)\n exp_parts = (self.pe('{a}'), comp, self.pe('S'))\n self.assertEqual(parts, exp_parts)\n \n tree = self.pe('{1} | 2')\n self.assertFalse(is_setunion(tree))\n \n def test_simplecall(self):\n tree = self.pe('f(a, b + c)')\n func, args = get_plaincall(tree)\n self.assertEqual(func, 'f')\n self.assertEqual(args, (self.pe('a'), self.pe('b + c')))\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.44726642966270447, "alphanum_fraction": 0.4554939568042755, "avg_line_length": 29.551912307739258, "blob_id": "ea95e89f077311e6c80df62bfc6ccf87f292dc1b", "content_id": "ea6ab8fd79df82c1c2312ddf64c4b2a6816cbaaf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16773, "license_type": "no_license", "max_line_length": 75, "num_lines": 549, "path": "/experiments/rbac/corerbac/run_corerbac_exp.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Run the Core RBAC experiment.\"\"\"\n\n\nimport os\nimport sys\nimport importlib\nfrom copy import deepcopy\nfrom random import sample, randrange\n\nfrom frexp import (ExpWorkflow, Datagen, MetricExtractor, ScaledExtractor,\n NormalizedExtractor)\n\nfrom experiments.util import SmallExtractor, LargeExtractor, canonize\n\n\nclass CoreRBACDatagen(Datagen):\n \n \"\"\"Generate structures and relate them according to a fixed\n proportion. Perform a series of operation patterns consisting\n of creating several sessions, performing checkaccess queries,\n and then deleting these sessions.\n \n Parameters:\n n_users -- number of users\n n_roles -- number of roles\n n_ops -- number of operations\n n_objs -- number of objects\n rpu -- roles per user\n ppr -- permissions per role\n rps -- active roles per session\n q_objs -- number of queryable objects\n n_pat -- number of patterns\n s_pat -- number of sessions created per pattern\n qs_pat -- number of queried sessions per pattern\n q_pat -- number of queries per session pattern\n \"\"\"\n \n def generate(self, P):\n n_users = P['n_users']\n n_roles = P['n_roles']\n n_ops = P['n_ops']\n n_objs = P['n_objs']\n rpu = P['rpu']\n ppr = P['ppr']\n rps = P['rps']\n q_objs = P['q_objs']\n n_pat = P['n_pat']\n s_pat = P['s_pat']\n qs_pat = P['qs_pat']\n q_pat = P['q_pat']\n \n perms = [(i, j) for i in range(n_ops)\n for j in range(n_objs)]\n \n UR = {i: sample(range(n_roles), rpu)\n for i in range(n_users)}\n PR = {i: sample(perms, ppr)\n for i in range(n_roles)}\n \n # Element format:\n # [([(u, s, ars), ...], [(s, op, obj), ...]),\n # ...\n # ]\n OPS = []\n for _ in range(n_pat):\n SES = []\n for s in range(s_pat):\n u = randrange(n_users)\n ars = sample(UR[u], rps)\n SES.append((u, s, ars))\n \n CA = [(randrange(qs_pat), randrange(n_ops), randrange(q_objs))\n for _ in range(q_pat)] \n \n OPS.append((SES, CA))\n \n return dict(\n dsparams = P,\n UR = UR,\n PR = PR,\n OPS = OPS,\n )\n \n # Set to [False, True] to run two versions, first with all\n # operations and then with updates only (no queries).\n noqs = [False]\n \n def get_tparams_list(self, dsparams_list):\n return [dict(tid = dsp['dsid'] + '_' + str(noq),\n dsid = dsp['dsid'],\n prog = prog,\n noq = noq)\n for prog in self.progs\n for dsp in dsparams_list\n for noq in self.noqs\n ]\n\n\nclass CoreRBACDriver:\n \n def __init__(self, pipe_filename):\n import gc\n import pickle\n \n gc.disable()\n \n with open(pipe_filename, 'rb') as pf:\n dataset, prog, other_tparams = pickle.load(pf)\n os.remove(pipe_filename)\n \n \n self.dataset = dataset\n self.prog = prog\n self.noq = other_tparams['noq']\n self.module = None\n self.results = {}\n \n self.setUp()\n \n from frexp.util import StopWatch, user_time\n from time import process_time, perf_counter\n timer_user = StopWatch(user_time)\n timer_cpu = StopWatch(process_time)\n timer_wall = StopWatch(perf_counter)\n \n with timer_user, timer_cpu, timer_wall:\n self.run()\n \n import incoq.runtime\n self.results['size'] = incoq.runtime.get_total_structure_size(\n self.module.__dict__)\n self.results['time_user'] = timer_user.consume()\n self.results['time_cpu'] = timer_cpu.consume()\n self.results['time_wall'] = timer_wall.consume()\n \n self.results['stdmetric'] = self.results['time_cpu']\n \n \n with open(pipe_filename, 'wb') as pf:\n pickle.dump(self.results, pf)\n \n def setUp(self):\n # Import driven program.\n dirname, filename = os.path.split(self.prog)\n if dirname:\n sys.path.append(dirname)\n try:\n self.module = importlib.import_module(\n 'experiments.rbac.corerbac.' + filename)\n finally:\n if dirname:\n sys.path.pop()\n \n \n m = self.module\n ds = self.dataset\n P = ds['dsparams']\n \n def u(i):\n return 'u' + str(i)\n def r(i):\n return 'r' + str(i)\n def op(i):\n return 'op' + str(i)\n def obj(i):\n return 'obj' + str(i)\n def s(i):\n return 's' + str(i)\n \n # Initialize dataset.\n for i in range(P['n_users']):\n m.AddUser(u(i))\n for i in range(P['n_roles']):\n m.AddRole(r(i))\n for i in range(P['n_ops']):\n m.AddOperation(op(i))\n for i in range(P['n_objs']):\n m.AddObject(obj(i))\n for i, rs in ds['UR'].items():\n for j in rs:\n m.AssignUser(u(i), r(j))\n for i, ps in ds['PR'].items():\n for (j, k) in ps:\n m.GrantPermission(op(j), obj(k), r(i))\n \n # Preprocess operations.\n self.OPS = []\n for SES, CA in ds['OPS']:\n N_SES = [(u(i), s(j), {r(k) for k in ars})\n for i, j, ars in SES]\n if self.noq:\n N_CA = []\n else:\n N_CA = [(s(i), op(j), obj(k))\n for i, j, k in CA]\n self.OPS.append((N_SES, N_CA))\n \n # Do initial demand for all combinations of queried\n # sessions and objects. Since CheckAccess has membership\n # preconditions, we need to actually create these sessions\n # first, and we'll destroy them before actually starting\n # the test proper.\n qs_pat = self.dataset['dsparams']['qs_pat']\n q_objs = self.dataset['dsparams']['q_objs']\n queried_sessions = ['s' + str(i) for i in range(qs_pat)]\n queried_objects = ['obj' + str(i) for i in range(q_objs)]\n for s in queried_sessions:\n m.CreateSession('u0', s, set())\n for obj in queried_objects:\n m.CheckAccess(s, 'op0', obj)\n m.DeleteSession('u0', s)\n \n def run(self):\n CreateSession = self.module.CreateSession\n DeleteSession = self.module.DeleteSession\n CheckAccess_nodemand = self.module.CheckAccess_nodemand\n \n for SES, CA in self.OPS:\n for u, s, ars in SES:\n CreateSession(u, s, ars)\n for s, op, obj in CA:\n CheckAccess_nodemand(s, op, obj)\n for u, s, _ars in SES:\n DeleteSession(u, s)\n\nclass CoreRBACVerifyDriver:\n \n def __init__(self, pipe_filename):\n import gc\n import pickle\n \n gc.disable()\n \n with open(pipe_filename, 'rb') as pf:\n dataset, prog, other_tparams = pickle.load(pf)\n os.remove(pipe_filename)\n \n \n self.dataset = dataset\n self.prog = prog\n self.noq = other_tparams['noq']\n self.module = None\n self.results = {'output': []}\n \n self.setUp()\n \n from frexp.util import StopWatch, user_time\n from time import process_time, perf_counter\n timer_user = StopWatch(user_time)\n timer_cpu = StopWatch(process_time)\n timer_wall = StopWatch(perf_counter)\n \n with timer_user, timer_cpu, timer_wall:\n self.run()\n \n \n self.results = canonize(self.results)\n \n with open(pipe_filename, 'wb') as pf:\n pickle.dump(self.results, pf)\n \n def setUp(self):\n # Import driven program.\n dirname, filename = os.path.split(self.prog)\n if dirname:\n sys.path.append(dirname)\n try:\n self.module = importlib.import_module(\n 'experiments.rbac.corerbac.' + filename)\n finally:\n if dirname:\n sys.path.pop()\n \n \n m = self.module\n ds = self.dataset\n P = ds['dsparams']\n \n def u(i):\n return 'u' + str(i)\n def r(i):\n return 'r' + str(i)\n def op(i):\n return 'op' + str(i)\n def obj(i):\n return 'obj' + str(i)\n def s(i):\n return 's' + str(i)\n \n # Initialize dataset.\n for i in range(P['n_users']):\n m.AddUser(u(i))\n for i in range(P['n_roles']):\n m.AddRole(r(i))\n for i in range(P['n_ops']):\n m.AddOperation(op(i))\n for i in range(P['n_objs']):\n m.AddObject(obj(i))\n for i, rs in ds['UR'].items():\n for j in rs:\n m.AssignUser(u(i), r(j))\n for i, ps in ds['PR'].items():\n for (j, k) in ps:\n m.GrantPermission(op(j), obj(k), r(i))\n \n # Preprocess operations.\n self.OPS = []\n for SES, CA in ds['OPS']:\n N_SES = [(u(i), s(j), {r(k) for k in ars})\n for i, j, ars in SES]\n if self.noq:\n N_CA = []\n else:\n N_CA = [(s(i), op(j), obj(k))\n for i, j, k in CA]\n self.OPS.append((N_SES, N_CA))\n \n def run(self):\n CreateSession = self.module.CreateSession\n DeleteSession = self.module.DeleteSession\n CheckAccess = self.module.CheckAccess\n \n for SES, CA in self.OPS:\n for u, s, ars in SES:\n CreateSession(u, s, ars)\n for s, op, obj in CA:\n output = CheckAccess(s, op, obj)\n self.results['output'].append(deepcopy(output))\n for u, s, _ars in SES:\n DeleteSession(u, s)\n\n\nclass CoreRBACExtractor(MetricExtractor, SmallExtractor):\n \n def get_series_points(self, datapoints, sid, *,\n average):\n inner_sid, kind = sid\n \n # Grab all data for the inner sid, and split based on noq.\n inner_data = self.get_series_data(datapoints, inner_sid)\n q_data = [p for p in inner_data\n if not p['noq']]\n noq_data = [p for p in inner_data\n if p['noq']]\n \n # For 'all' and 'updates', normal behavior on the q or\n # noq points.\n if kind == 'all':\n return self.project_and_average_data(q_data, average=average)\n elif kind == 'updates':\n return self.project_and_average_data(noq_data, average=average)\n elif kind == 'queries':\n q_avg = self.project_and_average_data(q_data, average=True)\n noq_avg = self.project_and_average_data(noq_data, average=True)\n \n data = []\n for ((ax, ay, _alo, _ahi), (ux, uy, _ulo, _uhi)) in \\\n zip(q_avg, noq_avg):\n assert ax == ux\n # Use 0 for errorbar data.\n data.append((ax, ay - uy, 0, 0))\n return data\n else:\n assert()\n\n\nclass CoreRBACWorkflow(ExpWorkflow):\n \n ExpDatagen = CoreRBACDatagen\n ExpExtractor = CoreRBACExtractor\n ExpDriver = CoreRBACDriver\n ExpVerifyDriver = CoreRBACVerifyDriver\n \n require_ac = False ###\n\n\nclass CoreRoles(CoreRBACWorkflow):\n \n prefix = 'results/corerbac'\n \n class ExpDatagen(CoreRBACWorkflow.ExpDatagen):\n \n progs = [\n 'coreRBAC_in',\n 'coreRBAC_checkaccess_inc',\n 'coreRBAC_checkaccess_dem',\n ]\n \n noqs = [\n False,\n True,\n ]\n \n def get_dsparams_list(self):\n return [\n dict(\n dsid = str(x),\n x = x,\n \n n_users = 10,\n n_roles = x,\n n_ops = 20,\n n_objs = 20,\n \n rpu = 10,\n ppr = 10,\n rps = 10,\n \n q_objs = 20,\n n_pat = 1000,\n s_pat = 1,\n qs_pat = 1,\n q_pat = 1000,\n )\n for x in range(10, 101, 10)\n ]\n \n stddev_window = .1\n min_repeats = 10\n max_repeats = 50\n \n class ExpExtractor(CoreRBACWorkflow.ExpExtractor, ScaledExtractor):\n \n series = [\n (('coreRBAC_in', 'queries'),\n 'original query',\n 'red', '1-2 s poly1'),\n (('coreRBAC_in', 'updates'),\n 'original create/delete session',\n 'red', '1-4 _s poly1'),\n (('coreRBAC_checkaccess_inc', 'queries'),\n 'incremental query',\n 'blue', '1-2 o poly1'),\n (('coreRBAC_checkaccess_inc', 'updates'),\n 'incremental create/delete session',\n 'blue', '1-4 _o poly1'),\n (('coreRBAC_checkaccess_dem', 'queries'),\n 'filtered query',\n 'green', '1-2 ^ poly1'),\n (('coreRBAC_checkaccess_dem', 'updates'),\n 'filtered create/delete session',\n 'green', '1-4 _^ poly1'),\n ]\n \n multipliers = {('coreRBAC_in', 'queries'): .2}\n \n title = None\n ylabel = 'Running time (in seconds)'\n xlabel = 'Number of roles'\n metric = 'time_cpu'\n \n # Adjust geometry for external legend.\n width = 6\n figsize = (width, 2.625)\n tightlayout_bbox = (0, 0, 3.5/width, 1)\n legend_bbox = (1, 0, 1, 1)\n legend_loc = 'center left'\n \n xmin = 5\n xmax = 105\n \n y_ticklocs = [0, 1, 2, 3, 4]\n\n\nclass CoreDemand(CoreRBACWorkflow):\n \n prefix = 'results/corerbac_demand'\n \n class ExpDatagen(CoreRBACWorkflow.ExpDatagen):\n \n progs = [\n 'coreRBAC_checkaccess_inc',\n 'coreRBAC_checkaccess_dem',\n ]\n \n def get_dsparams_list(self):\n return [\n dict(\n dsid = str(x),\n x = x,\n \n n_users = 10,\n n_roles = 100,\n n_ops = 20,\n n_objs = 1000,\n \n rpu = 10,\n ppr = 100,\n rps = 10,\n \n q_objs = x,\n n_pat = 1000,\n s_pat = 1,\n qs_pat = 1,\n q_pat = 1000,\n )\n for x in [1] + list(range(50, 1000 + 1, 50))\n ]\n \n stddev_window = .1\n min_repeats = 10\n max_repeats = 50\n \n class ExpExtractor(CoreRBACWorkflow.ExpExtractor):\n \n series = [\n (('coreRBAC_checkaccess_inc', 'all'),\n 'incremental',\n 'blue', '- o poly1'),\n (('coreRBAC_checkaccess_dem', 'all'),\n 'filtered',\n 'green', '- ^ poly1'),\n ]\n \n title = None\n ylabel = 'Running time (in seconds)'\n xlabel = 'Number of queried objects'\n metric = 'time_cpu'\n \n ymin = 0\n xmin = -50\n xmax = 1050\n ymax = 5\n\nclass CoreDemandNorm(CoreDemand):\n \n prefix = 'results/corerbac_demand'\n \n class ExpExtractor(NormalizedExtractor, CoreDemand.ExpExtractor):\n \n series = [\n (('coreRBAC_checkaccess_inc', 'all'),\n 'incremental',\n 'blue', '- o normal'),\n (('coreRBAC_checkaccess_dem', 'all'),\n 'filtered',\n 'green', '- ^ poly1'),\n ]\n \n base_sid_map = {\n ('coreRBAC_checkaccess_dem', 'all'):\n ('coreRBAC_checkaccess_inc', 'all'),\n }\n \n def normalize(self, pre_y, base_y):\n return pre_y / base_y\n \n ylabel = 'Running time (normalized)'\n ymax = None\n" }, { "alpha_fraction": 0.47313129901885986, "alphanum_fraction": 0.5535353422164917, "avg_line_length": 38.935482025146484, "blob_id": "38abb0418ab673564556d9b2333bb2c0826aeede", "content_id": "baa4769c827e081d47fa0c8c9d13dd2f6ad739d8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2475, "license_type": "no_license", "max_line_length": 85, "num_lines": 62, "path": "/incoq/tests/programs/aggr/params_out.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from incoq.runtime import *\n# Aggr1 := sum(setmatch(R, 'bu', k), None)\n_m_Aggr1_out = Map()\ndef _maint__m_Aggr1_out_add(_e):\n (v3_1, v3_2) = _e\n if (v3_1 not in _m_Aggr1_out):\n _m_Aggr1_out[v3_1] = set()\n _m_Aggr1_out[v3_1].add(v3_2)\n\ndef _maint__m_Aggr1_out_remove(_e):\n (v4_1, v4_2) = _e\n _m_Aggr1_out[v4_1].remove(v4_2)\n if (len(_m_Aggr1_out[v4_1]) == 0):\n del _m_Aggr1_out[v4_1]\n\ndef _maint_Aggr1_add(_e):\n (v1_v1, v1_v2) = _e\n v1_val = _m_Aggr1_out.singlelookup(v1_v1, (0, 0))\n (v1_state, v1_count) = v1_val\n v1_state = (v1_state + v1_v2)\n v1_val = (v1_state, (v1_count + 1))\n v1_1 = v1_v1\n if (not (len((_m_Aggr1_out[v1_v1] if (v1_v1 in _m_Aggr1_out) else set())) == 0)):\n v1_elem = _m_Aggr1_out.singlelookup(v1_v1)\n # Begin maint _m_Aggr1_out before \"Aggr1.remove((v1_1, v1_elem))\"\n _maint__m_Aggr1_out_remove((v1_1, v1_elem))\n # End maint _m_Aggr1_out before \"Aggr1.remove((v1_1, v1_elem))\"\n # Begin maint _m_Aggr1_out after \"Aggr1.add((v1_1, v1_val))\"\n _maint__m_Aggr1_out_add((v1_1, v1_val))\n # End maint _m_Aggr1_out after \"Aggr1.add((v1_1, v1_val))\"\n\ndef _maint_Aggr1_remove(_e):\n (v2_v1, v2_v2) = _e\n v2_val = _m_Aggr1_out.singlelookup(v2_v1)\n if (v2_val[1] == 1):\n v2_1 = v2_v1\n v2_elem = _m_Aggr1_out.singlelookup(v2_v1)\n # Begin maint _m_Aggr1_out before \"Aggr1.remove((v2_1, v2_elem))\"\n _maint__m_Aggr1_out_remove((v2_1, v2_elem))\n # End maint _m_Aggr1_out before \"Aggr1.remove((v2_1, v2_elem))\"\n else:\n (v2_state, v2_count) = v2_val\n v2_state = (v2_state - v2_v2)\n v2_val = (v2_state, (v2_count - 1))\n v2_1 = v2_v1\n v2_elem = _m_Aggr1_out.singlelookup(v2_v1)\n # Begin maint _m_Aggr1_out before \"Aggr1.remove((v2_1, v2_elem))\"\n _maint__m_Aggr1_out_remove((v2_1, v2_elem))\n # End maint _m_Aggr1_out before \"Aggr1.remove((v2_1, v2_elem))\"\n # Begin maint _m_Aggr1_out after \"Aggr1.add((v2_1, v2_val))\"\n _maint__m_Aggr1_out_add((v2_1, v2_val))\n # End maint _m_Aggr1_out after \"Aggr1.add((v2_1, v2_val))\"\n\nfor x in [('A', 1), ('A', 2), ('A', 3), ('B', 4), ('B', 5)]:\n # Begin maint Aggr1 after \"R.add(x)\"\n _maint_Aggr1_add(x)\n # End maint Aggr1 after \"R.add(x)\"\n# Begin maint Aggr1 before \"R.remove(('B', 5))\"\n_maint_Aggr1_remove(('B', 5))\n# End maint Aggr1 before \"R.remove(('B', 5))\"\nk = 'A'\nprint(_m_Aggr1_out.singlelookup(k, (0, 0))[0])" }, { "alpha_fraction": 0.3759894371032715, "alphanum_fraction": 0.4670184552669525, "avg_line_length": 31.978260040283203, "blob_id": "4d0db932cb0e99fab38802eef1eb22bb608d5521", "content_id": "0c28450ced69b341ad6047b9cc30828bf14c7013", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1516, "license_type": "no_license", "max_line_length": 76, "num_lines": 46, "path": "/incoq/tests/programs/auxmap/equality_out.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from incoq.runtime import *\n_m_P_ub2 = Map()\ndef _maint__m_P_ub2_add(_e):\n (v3_1, v3_2, v3_3) = _e\n if ((v3_2 == v3_3)):\n if (v3_2 not in _m_P_ub2):\n _m_P_ub2[v3_2] = set()\n _m_P_ub2[v3_2].add(v3_1)\n\ndef _maint__m_P_ub2_remove(_e):\n (v4_1, v4_2, v4_3) = _e\n if ((v4_2 == v4_3)):\n _m_P_ub2[v4_2].remove(v4_1)\n if (len(_m_P_ub2[v4_2]) == 0):\n del _m_P_ub2[v4_2]\n\n_m_P_uu2 = Map()\ndef _maint__m_P_uu2_add(_e):\n (v1_1, v1_2, v1_3) = _e\n if ((v1_2 == v1_3)):\n if (() not in _m_P_uu2):\n _m_P_uu2[()] = set()\n _m_P_uu2[()].add((v1_1, v1_2))\n\ndef _maint__m_P_uu2_remove(_e):\n (v2_1, v2_2, v2_3) = _e\n if ((v2_2 == v2_3)):\n _m_P_uu2[()].remove((v2_1, v2_2))\n if (len(_m_P_uu2[()]) == 0):\n del _m_P_uu2[()]\n\nfor v in [(1, 2, 2), (2, 2, 2), (3, 3, 3), (4, 1, 2), (5, 2, 3), (9, 9, 9)]:\n # Begin maint _m_P_ub2 after \"P.add(v)\"\n _maint__m_P_ub2_add(v)\n # End maint _m_P_ub2 after \"P.add(v)\"\n # Begin maint _m_P_uu2 after \"P.add(v)\"\n _maint__m_P_uu2_add(v)\n # End maint _m_P_uu2 after \"P.add(v)\"\n# Begin maint _m_P_uu2 before \"P.remove((9, 9, 9))\"\n_maint__m_P_uu2_remove((9, 9, 9))\n# End maint _m_P_uu2 before \"P.remove((9, 9, 9))\"\n# Begin maint _m_P_ub2 before \"P.remove((9, 9, 9))\"\n_maint__m_P_ub2_remove((9, 9, 9))\n# End maint _m_P_ub2 before \"P.remove((9, 9, 9))\"\nprint(sorted((_m_P_uu2[()] if (() in _m_P_uu2) else set())))\nprint(sorted((_m_P_ub2[2] if (2 in _m_P_ub2) else set())))" }, { "alpha_fraction": 0.6127272844314575, "alphanum_fraction": 0.6127272844314575, "avg_line_length": 28.464284896850586, "blob_id": "c3cb4c90766893337c93faecf9a3ab50cb2f0e31", "content_id": "f11584c532c8008725f2b40c651e766c370e8903", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1650, "license_type": "no_license", "max_line_length": 70, "num_lines": 56, "path": "/incoq/compiler/incast/nodes.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Node definitions for IncAST, with type information.\"\"\"\n\n\n__all__ = [\n 'TypeAdder',\n \n # Add in nodes_untyped.__all__\n]\n\n\nfrom simplestruct import Field\nfrom iast import NodeTransformer\n\nfrom .nodes_untyped import (__all__ as untyped_all, native_nodes,\n incast_nodes as incast_nodes_untyped)\n\n__all__.extend(untyped_all)\n\n\nincast_nodes = incast_nodes_untyped.copy()\n\n# Programmatically generate new versions of all the expr subclasses\n# so they contain a \"type\" field.\nexpr_node = incast_nodes['expr']\nfor name, node in incast_nodes.items():\n if issubclass(node, expr_node) and node is not expr_node:\n # Copy the old class's namespace.\n namespace = node.__dict__.copy()\n fields = node._fields + ('type',)\n namespace['_fields'] = fields\n namespace['__module__'] = __name__\n namespace['type'] = Field(default=None)\n # The only base class is expr, which itself remains unchanged.\n assert node.__bases__ == (expr_node,)\n new_node = type(name, (expr_node,), namespace)\n incast_nodes[name] = new_node\n\nglobals().update(incast_nodes)\n\n\nclass TypeAdder(NodeTransformer):\n \n \"\"\"Replace untyped expression nodes with typed ones.\n Use None as the type.\n \"\"\"\n \n def node_visit(self, node):\n node = self.generic_visit(node)\n \n if isinstance(node, incast_nodes_untyped['expr']):\n new_nodetype = incast_nodes[node.__class__.__name__]\n fieldvals = [getattr(node, f) for f in node._fields]\n fieldvals += [None]\n node = new_nodetype(*fieldvals)\n \n return node\n" }, { "alpha_fraction": 0.5391777753829956, "alphanum_fraction": 0.541988730430603, "avg_line_length": 28.189743041992188, "blob_id": "7a696841a592414c12b0ae222b5247e5d153e443", "content_id": "01ce2177904aa916d06328b44bc486ae26c05169", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5692, "license_type": "no_license", "max_line_length": 73, "num_lines": 195, "path": "/incoq/transform/schema.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Define views over stats data to produce tables.\"\"\"\n\n\n__all__ = [\n 'BaseSchema',\n 'StatkeySchema',\n 'StandardSchema',\n 'GroupedSchema',\n 'OrigIncFilterSchema',\n 'CostSchema',\n]\n\n\nimport io\nimport csv\n\ntry:\n from tabulate import tabulate\n HAVE_TABULATE = True\nexcept ImportError:\n HAVE_TABULATE = False\n\n\nclass BaseSchema:\n \n \"\"\"View derived from an allstats dictionary.\"\"\"\n \n def __init__(self, stats):\n self.header = self.get_header()\n self.body = self.get_body(stats)\n \n def get_header(self):\n \"\"\"Return the header (list of strings).\"\"\"\n raise NotImplementedError\n \n def get_body(self, stats):\n \"\"\"Generate a table instance from a stats dict. Return the body\n (2D list of values.)\n \"\"\"\n raise NotImplementedError\n \n def to_csv(self):\n \"\"\"Return this schema in csv format.\"\"\"\n filelike = io.StringIO(newline='')\n writer = csv.writer(filelike)\n writer.writerow(self.header)\n for line in self.body:\n writer.writerow(line)\n return filelike.getvalue()\n \n def to_ascii(self):\n \"\"\"Return the schema in pretty-printed format if tabulate\n is available, or csv format otherwise.\n \"\"\"\n if HAVE_TABULATE:\n return tabulate(self.body, self.header, floatfmt='.2f')\n else:\n return self.to_csv()\n \n def save_csv(self, name):\n with open(name, 'wt') as file:\n file.write(self.to_csv())\n\n\nclass StatkeySchema(BaseSchema):\n \n \"\"\"Table formed by using selected entry keys as rows and stats\n attributes as columns.\n \"\"\"\n \n rows = []\n \"\"\"List of pairs: (entry name, display name).\"\"\"\n \n cols = []\n \"\"\"List of triples: (stat name, display name, display format).\"\"\"\n \n def get_header(self):\n return ['Program'] + [col_disp for _, col_disp, _ in self.cols]\n \n def get_rowdata(self, allstats, key):\n \"\"\"Hook for modifying how rows are retrieved.\"\"\"\n return allstats.get(key, None)\n \n def get_coldata(self, stats, key):\n return stats.get(key, None)\n \n def get_body(self, allstats):\n body = []\n for row_key, row_disp in self.rows:\n stats = self.get_rowdata(allstats, row_key)\n if stats is not None:\n row = []\n for col_key, col_disp, col_fmt in self.cols:\n col_data = self.get_coldata(stats, col_key)\n if col_data is not None:\n if col_fmt is None:\n col_fmt = ''\n row.append(format(col_data, col_fmt))\n else:\n row.append(None)\n else:\n row_disp = '(!) ' + row_disp\n row = ['!' for _ in range(len(self.cols))]\n \n row = [row_disp] + row\n body.append(row)\n \n return body\n\n\nclass StandardSchema(StatkeySchema):\n \n cols = [\n ('lines', 'LOC', None),\n ('trans time', 'Time', '.2f'),\n ('orig queries', 'in. queries', None),\n ('orig updates', 'in. updates', None),\n ('incr comps', 'Incr. comps', None),\n ('incr aggrs', 'Incr. aggrs', None),\n ('dem structs', 'Dem invs', None),\n ('comps expanded', 'Comps exp.', None),\n ('auxmaps', 'Maps', None),\n ]\n\n\nclass GroupedSchema(StatkeySchema):\n \n \"\"\"Schema that groups data from multiple entries.\"\"\"\n \n rows = []\n \"\"\"List of pairs: (group, display name), where group is\n a list of entries.\n \"\"\"\n \n cols = []\n \"\"\"List of quadruples: (variant identifier, stat name,\n display name, display format). variant identifier is the\n index of the entry we want to examine from the current group.\n \"\"\"\n \n equalities = []\n \"\"\"List of pairs of merged entries that should be equal.\"\"\"\n \n def get_rowdata(self, allstats, key):\n # Merge dictionaries for each entry named by the\n # row into a single dict. The keys get augmented\n # with the corresponding entry id.\n rowdata = {}\n for i, entry in enumerate(key):\n stats = allstats.get(entry, {})\n for attr, value in stats.items():\n rowdata[(i, attr)] = value\n for lhs, rhs in self.equalities:\n if lhs in rowdata and rhs in rowdata:\n assert rowdata[lhs] == rowdata[rhs]\n return rowdata\n\n\nclass OrigIncFilterSchema(GroupedSchema):\n \n # Pull orig queries/updates from dem in case inc is missing.\n cols = [\n ((0, 'lines'), 'Original LOC', None),\n ((2, 'orig queries'), 'Queries', None),\n ((2, 'orig updates'), 'Updates', None),\n ((1, 'lines'), 'Inc. LOC', None),\n ((1, 'trans time'), 'Inc. trans. time', '.2f'),\n ((2, 'lines'), 'Filtered LOC', None),\n ((2, 'trans time'), 'Filtered trans. time', '.2f'),\n ]\n \n # Number of queries/updates should agree between inc and dem.\n equalities = [\n ((1, 'orig queries'), (2, 'orig queries')),\n ((1, 'orig updates'), (2, 'orig updates')),\n ]\n\n\nclass CostSchema(StatkeySchema):\n \n \"\"\"Table of costs.\"\"\"\n \n rows = []\n \"\"\"List of pairs: (entry name, display name).\"\"\"\n \n cols = []\n \"\"\"List of triples: (function name, display name, display format).\"\"\"\n \n def get_coldata(self, stats, key):\n from incoq.compiler.cost import PrettyPrinter\n cost = stats.get('costs', {}).get(key, None)\n if cost is None:\n return None\n coststr = PrettyPrinter.run(cost)\n return 'O({})'.format(coststr)\n" }, { "alpha_fraction": 0.4495912790298462, "alphanum_fraction": 0.4959128201007843, "avg_line_length": 20.58823585510254, "blob_id": "9db983d94dc37663015ee20e64483218fb027108", "content_id": "4c7cb09213119c5679c90f7c8aae74278c61abfc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 367, "license_type": "no_license", "max_line_length": 75, "num_lines": 17, "path": "/incoq/tests/programs/comp/setmatchcomp_in.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "# Incrementalized comprehension with setmatch.\n\nfrom incoq.runtime import *\n\nQUERYOPTIONS(\n '{(x, z) for (x, y) in E for (y2, z) in E if y == y2}',\n impl = 'inc',\n)\n\nE = Set()\n\nfor v1, v2 in {(1, 2), (1, 3), (2, 3), (3, 4)}:\n E.add((v1, v2))\n\np = 1\nprint(sorted(setmatch({(x, z) for (x, y) in E for (y2, z) in E if y == y2},\n 'bu', p)))\n" }, { "alpha_fraction": 0.45983755588531494, "alphanum_fraction": 0.47224730253219604, "avg_line_length": 33.348838806152344, "blob_id": "2e22c627a0ce8aa48a397f0ec34801603e0998fb", "content_id": "6f64a45a8b4a3708078c231406db6aa13f850c9e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4432, "license_type": "no_license", "max_line_length": 78, "num_lines": 129, "path": "/incoq/tests/invinc/tup/test_flatten.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Unit tests for flatten.py.\"\"\"\n\n\nimport unittest\n\nimport incoq.compiler.incast as L\nfrom incoq.compiler.cost import UnitCost\n\nfrom incoq.compiler.tup.flatten import *\nfrom incoq.compiler.tup.flatten import (\n tuptree_to_type, tuptype_leaves, make_flattup_code,\n UpdateFlattener, get_clause_vars, ClauseFlattener, ReltypeGetter,\n path_to_elttype)\nfrom incoq.compiler.central import CentralCase\n\n\nclass FlattenCase(CentralCase):\n \n def setUp(self):\n super().setUp()\n self.tuptype = ('<T>', 'a', ('<T>', 'b', 'c'))\n \n def test_tuptree_type(self):\n tree = L.pe('(x, (y, z))')\n tuptype = tuptree_to_type(tree)\n exp_tuptype = ('<T>', 'x', ('<T>', 'y', 'z'))\n self.assertEqual(tuptype, exp_tuptype)\n \n def test_leaves(self):\n leaves = tuptype_leaves(self.tuptype)\n exp_leaves = [(0,), (1, 0), (1, 1)]\n self.assertEqual(leaves, exp_leaves)\n \n def test_flattup_code(self):\n in_node = L.ln('x')\n out_node = L.sn('y')\n code = make_flattup_code(self.tuptype, in_node, out_node, 't')\n exp_code = L.pc('''\n y = (x[0], x[1][0], x[1][1])\n ''')\n self.assertEqual(code, exp_code)\n \n in_node = L.pe('x.a')\n code = make_flattup_code(self.tuptype, in_node, out_node, 't')\n exp_code = L.pc('''\n t = x.a\n y = (t[0], t[1][0], t[1][1])\n ''')\n self.assertEqual(code, exp_code)\n \n def test_updateflattener(self):\n tree = L.p('''\n R.add((1, (2, 3)))\n ''')\n tree = UpdateFlattener.run(tree, 'R', self.tuptype,\n self.manager.namegen)\n exp_tree = L.p('''\n _tv1 = (1, (2, 3))\n _ftv1 = (_tv1[0], _tv1[1][0], _tv1[1][1])\n R.add(_ftv1)\n ''')\n self.assertEqual(tree, exp_tree)\n \n def test_getclausevars(self):\n lhs = L.Tuple((L.sn('x'), L.Tuple((L.sn('y'), L.sn('z')), L.Store())),\n L.Store())\n vars = get_clause_vars(L.Enumerator(lhs, L.ln('R')), self.tuptype)\n exp_vars = ['x', 'y', 'z']\n self.assertEqual(vars, exp_vars)\n \n def test_clauseflattener(self):\n code = L.pc('''\n COMP({x for x in S for (x, (y, z)) in R}, [], {})\n ''')\n code = ClauseFlattener.run(code, 'R', self.tuptype)\n exp_code = L.pc('''\n COMP({x for x in S for (x, y, z) in R}, [], {})\n ''')\n self.assertEqual(code, exp_code)\n \n def test_reltypegetter(self):\n tree = L.pc('''\n print(COMP({x for x in S for (x, (y, z)) in R}, [], {}))\n ''')\n tuptype = ReltypeGetter.run(tree, 'R')\n exp_tuptype = ('<T>', 'x', ('<T>', 'y', 'z'))\n self.assertEqual(tuptype, exp_tuptype)\n \n tree = L.pc('''\n print(COMP({x for x in R for (x, (y, z)) in R}, [], {}))\n ''')\n with self.assertRaises(AssertionError):\n ReltypeGetter.run(tree, 'R')\n \n def test_pathtoelttype(self):\n ST, TT, OT = L.SetType, L.TupleType, L.ObjType\n vartype = ST(TT([OT('A'), OT('B'), TT([OT('C'), OT('D')])]))\n \n res = path_to_elttype([0], vartype)\n self.assertEqual(res, OT('A'))\n res = path_to_elttype([1], vartype)\n self.assertEqual(res, OT('B'))\n res = path_to_elttype([2, 0], vartype)\n self.assertEqual(res, OT('C'))\n res = path_to_elttype([2, 1], vartype)\n self.assertEqual(res, OT('D'))\n \n def test_flatten(self):\n ST, TT, OT = L.SetType, L.TupleType, L.ObjType\n self.manager.vartypes = {'R': ST(TT([OT('A'),\n TT([OT('B'), OT('C')])]))}\n code = L.p('''\n R.add((1, (2, 3)))\n print(COMP({x for x in S for (x, (y, z)) in R}, [], {}))\n ''')\n code = flatten_relations(code, ['R'], self.manager)\n exp_code = L.p('''\n _tv1 = (1, (2, 3))\n _ftv1 = (_tv1[0], _tv1[1][0], _tv1[1][1])\n R.add(_ftv1)\n print(COMP({x for x in S for (x, y, z) in R}, [], {}))\n ''')\n exp_vartypes = {'R': ST(TT([OT('A'), OT('B'), OT('C')]))}\n self.assertEqual(code, exp_code)\n self.assertEqual(self.manager.vartypes, exp_vartypes)\n\n\nif __name__ == '__main__':\n unittest.main()\n\n" }, { "alpha_fraction": 0.5627748966217041, "alphanum_fraction": 0.5631898045539856, "avg_line_length": 32.662010192871094, "blob_id": "9d168b1bbed391ed969fbba52aa2812aee1f6459", "content_id": "cf3ff308791077968cc436e95197ac6cf5d0d5e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12051, "license_type": "no_license", "max_line_length": 75, "num_lines": 358, "path": "/incoq/compiler/incast/treeconv.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Initial preprocessing for programs.\"\"\"\n\n\n__all__ = [\n 'add_runtimelib',\n 'remove_runtimelib',\n 'OptionsParser',\n 'parse_options',\n 'infer_params',\n 'attach_qopts_info',\n 'MaintExpander',\n 'export_program',\n]\n\n\nfrom .nodes import *\nfrom .structconv import parse_structast, NodeTransformer\nfrom .error import ProgramError\nfrom .helpers import is_importstar, get_importstar\nfrom .util import ScopeVisitor, VarsFinder, N\nfrom .nodeconv import comp_to_setcomp\nfrom .macros import IncMacroProcessor\n\n\ndef ts(tree):\n # Wrap the import for the package-level ts.\n from . import ts\n return ts(tree)\n\n\ndef add_runtimelib(tree):\n \"\"\"Add the runtime import.\"\"\"\n stmt = parse_structast('from incoq.runtime import *', mode='stmt')\n tree = tree._replace(body=(stmt,) + tree.body)\n return tree\n\ndef remove_runtimelib(tree):\n \"\"\"Remove imports of form \"from incoq.runtime import *\".\"\"\"\n class Trans(NodeTransformer):\n def visit_ImportFrom(self, node):\n if is_importstar(node):\n module = get_importstar(node)\n if module == 'incoq.runtime':\n return ()\n \n return Trans.run(tree)\n\n\ndef normalize_qopt(qstr):\n return IncMacroProcessor.run(parse_structast(qstr, mode='expr'))\n\nclass OptionsParser(NodeTransformer):\n \n \"\"\"Gather all options together into an aggregated opts structure,\n comprising a pair of nopts and qopts dictionaries. Return a pair\n of the tree stripped of options nodes, and this opts structure.\n \"\"\"\n \n def __init__(self, ext_opts=None):\n if ext_opts is None:\n ext_opts = ({}, {})\n self.nopts, self.qopts = ext_opts\n \n def process(self, tree):\n tree = super().process(tree)\n return tree, (self.nopts, self.qopts)\n \n def visit_NOptions(self, node):\n overlap = set(self.nopts.keys()).intersection(node.opts.keys())\n if len(overlap) > 0:\n raise ProgramError('Options declarations overlap on keys: ' +\n ', '.join(overlap))\n \n self.nopts.update(node.opts)\n \n return ()\n \n def visit_QOptions(self, node):\n query = normalize_qopt(node.query)\n if query in self.qopts:\n raise ProgramError('Multiple query options declarations for ' +\n ts(query))\n \n self.qopts[query] = node.opts\n \n return ()\n\ndef parse_options(tree, *, ext_opts=None):\n \"\"\"Process and remove options directives from the tree.\n Return a tuple of the tree and the options info.\n \n If ext_opts is given, it is used to augment the options info,\n as if they were specified in the program.\n \"\"\"\n # Normalize external qopts so that the keys are nodes instead\n # of source strings.\n if ext_opts is not None:\n ext_nopts, ext_qopts = ext_opts\n for k, v in dict(ext_qopts).items():\n new_k = normalize_qopt(k)\n ext_qopts[new_k] = v\n del ext_qopts[k]\n ext_opts = (ext_nopts, ext_qopts)\n \n return OptionsParser.run(tree, ext_opts)\n\n\ndef infer_params(tree, *, obj_domain):\n \"\"\"Fill in omitted parameter information for queries by looking\n at what variables are bound in the scope of their occurrence.\n \n If obj_domain is True, the right-hand sides of comprehension\n enumerators are considered for finding uses of parameters.\n \"\"\"\n # We consider a variable in a query to be a parameter if, at\n # the time the query is processed, the variable is in one of\n # the lexical scopes containing the query.\n #\n # This can cause confusion, as these semantics differ from Python.\n # For example, in Python you can define a global variable after\n # the function that uses it, so long as it is defined before\n # the function is called. This analysis makes no such allowance.\n #\n # The wildcard identifier '_' is never considered a parameter.\n #\n # Since ScopeVisitor is not written in a NodeTransformer-compatible\n # way, we take a two-pass approach. First, a map is built from\n # each query to a list of the scope information for each of its\n # occurrences. This scope info is just a set of the bound variables\n # of all lexical scopes containing the occurrence. Then, the\n # occurrences are processed again and assigned their parameter\n # information.\n \n scope_map = {}\n \n class ScopeMapper(ScopeVisitor):\n def visit_Comp(self, node):\n super().visit_Comp(node)\n occurrences = scope_map.setdefault(node, [])\n occurrences.append(self.current_bvars())\n \n ScopeMapper.run(tree)\n \n class ParamFiller(NodeTransformer):\n def visit_Comp(self, node):\n # Do the scope_map lookup before we modify\n # this node recursively.\n bvars = scope_map[node].pop(0)\n node = self.generic_visit(node)\n \n if node.params is None:\n qvars = VarsFinder.run(node, ignore_functions=True,\n ignore_rels=not obj_domain)\n qvars.discard('_')\n pvars = tuple(qvars.intersection(bvars))\n return node._replace(params=pvars)\n \n tree = ParamFiller.run(tree)\n return tree\n\n\ndef attach_qopts_info(tree, opts):\n \"\"\"Attach query options info to queries. Return the new tree\n and a list of unmatched query options strings.\n \n Parameter information is also attached to queries, from the\n 'params' option key.\n \n It is an error if parameter or option information is given\n when it is already specified in the node.\n \n Afterwards, all queries will have their options field set,\n although the params field may still be None.\n \"\"\"\n _nopts, qopts = opts\n unused = set(qopts.keys())\n \n class Trans(NodeTransformer):\n \n # Attach options info before recursing, since we need\n # the old node for lookup into qopts.\n \n def visit_Comp(self, node):\n options = qopts.get(node, None)\n unused.discard(node)\n \n if options is not None:\n if node.options is not None:\n raise ProgramError('Options info already exists '\n 'for query ' + ts(node))\n else:\n options = node.options if node.options is not None else {}\n options = dict(options)\n \n params = options.pop('params', None)\n if params is not None:\n if node.params is not None:\n ProgramError('Parameter info already exists '\n 'for query ' + ts(node))\n params = tuple(params)\n else:\n params = node.params\n \n node = node._replace(params=params, options=options)\n node = self.generic_visit(node)\n return node\n \n def visit_Aggregate(self, node):\n options = qopts.get(node, None)\n unused.discard(node)\n \n if options is not None:\n if node.options is not None:\n raise ProgramError('Options info already exists '\n 'for query ' + ts(node))\n else:\n options = node.options if node.options is not None else {}\n options = dict(options)\n \n node = node._replace(options=options)\n node = self.generic_visit(node)\n return node\n \n tree = Trans.run(tree)\n \n return tree, unused\n\n\nclass MaintExpander(NodeTransformer):\n \n \"\"\"Replace Maintenance nodes with the concatenation of their\n code.\n \"\"\"\n \n def visit_Maintenance(self, node):\n from . import ts\n \n node = self.generic_visit(node)\n \n precode = node.precode\n postcode = node.postcode\n \n def wrap(when, code):\n template = '{} maint {} {} \"{}\"'\n begintext = template.format(\n 'Begin', node.name, when, node.desc)\n endtext = template.format(\n 'End', node.name, when, node.desc)\n \n return (Comment(begintext),) + code + (Comment(endtext),)\n \n if len(precode) > 0:\n precode = wrap('before', precode)\n if len(postcode) > 0:\n postcode = wrap('after', postcode)\n \n return precode + node.update + postcode\n\n\nclass TreeExporter(NodeTransformer):\n \n \"\"\"Helper for export_program().\"\"\"\n \n def p(self, source, mode=None, subst=None):\n return parse_structast(source, mode=mode, subst=subst)\n \n def pc(self, source, **kargs):\n return self.p(source, mode='code', **kargs)\n \n def pe(self, source, **kargs):\n return self.p(source, mode='expr', **kargs)\n \n def visit_NOptions(self, node):\n return ()\n \n def visit_QOptions(self, node):\n return ()\n \n def visit_IsEmpty(self, node):\n node = self.generic_visit(node)\n return self.pe('len(TARGET) == 0',\n subst={'TARGET': node.target})\n \n def visit_AssignKey(self, node):\n node = self.generic_visit(node)\n return self.pc('TARGET[KEY] = VALUE',\n subst={'TARGET': node.target,\n 'KEY': node.key,\n 'VALUE': node.value})\n \n def visit_DelKey(self, node):\n node = self.generic_visit(node)\n return self.pc('del TARGET[KEY]',\n subst={'TARGET': node.target,\n 'KEY': node.key})\n \n def visit_Lookup(self, node):\n node = self.generic_visit(node)\n if node.default is not None:\n code = self.pe('TARGET[KEY] if KEY in TARGET else DEFAULT',\n subst={'TARGET': node.target,\n 'KEY': node.key,\n 'DEFAULT': node.default})\n else:\n code = self.pe('TARGET[KEY]',\n subst={'TARGET': node.target,\n 'KEY': node.key})\n return code\n \n def visit_ImgLookup(self, node):\n node = self.generic_visit(node)\n return self.pe('TARGET[KEY] if KEY in TARGET else set()',\n subst={'TARGET': node.target,\n 'KEY': node.key})\n \n def visit_RCImgLookup(self, node):\n node = self.generic_visit(node)\n return self.pe('TARGET[KEY] if KEY in TARGET else RCSet()',\n subst={'TARGET': node.target,\n 'KEY': node.key})\n \n def visit_DemQuery(self, node):\n node = self.generic_visit(node)\n call = Call(func=Name(N.queryfunc(node.demname), Load()),\n args=node.args,\n keywords=[],\n starargs=None,\n kwargs=None)\n if node.value is not None:\n code = BoolOp(op=And(), values=(call, node.value))\n else:\n code = call\n return code\n \n def visit_NoDemQuery(self, node):\n node = self.generic_visit(node)\n return node.value\n \n def visit_Comp(self, node):\n node = self.generic_visit(node)\n return comp_to_setcomp(node)\n \n def visit_Aggregate(self, node):\n node = self.generic_visit(node)\n return self.pe('OP(VALUE)',\n subst={'OP': node.op,\n 'VALUE': node.value})\n\n\ndef export_program(tree):\n \"\"\"Return an AST stripped of information that should not be in\n the output Python code. In particular, options and parameter\n information is removed from queries, and options directives are\n removed.\n \"\"\"\n # Although options directives shouldn't be in the tree at\n # this stage anyway.\n return TreeExporter.run(tree)\n" }, { "alpha_fraction": 0.5047619342803955, "alphanum_fraction": 0.516269862651825, "avg_line_length": 32.599998474121094, "blob_id": "917541617c393adbb557d864d4f417408cffa2b4", "content_id": "a050e2b0359c6082e13d8b7b8782c15b5d034d24", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2520, "license_type": "no_license", "max_line_length": 79, "num_lines": 75, "path": "/experiments/rbac/corerbac/test_rbac_helper.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "###############################################################################\n# test_rbac_helper.py #\n# Author: Jon Brandvein #\n###############################################################################\n\n\"\"\"Unit tests for the rbac_helper module.\"\"\"\n\n\nimport unittest\n\nfrom .rbac_helper import *\n\n\nclass Tester(unittest.TestCase):\n \n def test_opnames(self):\n # Actually, if this fails, the global code in rbac_helper\n # will probably fail first anyway, preventing this unittest\n # from running.\n for n in corerbac_op_names:\n self.assertTrue(n in CoreRBAC.__dict__)\n \n def test_Logger(self):\n rbac = LoggingCoreRBAC()\n rbac.log_AddUser('u1')\n rbac.log_AddRole('r1')\n rbac.AssignUser('u1', 'r1')\n \n exp_log = [('AddUser', 'u1'), ('AddRole', 'r1')]\n \n self.assertEqual(rbac.log, exp_log)\n \n def test_Emitter(self):\n rbac = EmittingCoreRBAC()\n \n self.assertTrue(rbac.emit_AddUser())\n self.assertTrue(rbac.emit_DeleteUser())\n self.assertFalse(rbac.emit_DeleteUser())\n self.assertTrue(rbac.emit_AddUser())\n \n exp_log = [('AddUser', 'u0'), ('DeleteUser', 'u0'), ('AddUser', 'u1')]\n \n self.assertEqual(rbac.log, exp_log)\n \n def test_DemandEmitter(self):\n rbac = DemandEmittingCoreRBAC(1)\n \n # Caution, using non-emit_* variety won't automatically\n # increment the internal fresh-names counters.\n \n rbac.AddUser('u0'),\n rbac.AddRole('r0'),\n rbac.AddOperation('op0'),\n rbac.AddObject('obj0'),\n rbac.GrantPermission('op0', 'obj0', 'r0'),\n rbac.AssignUser('u0', 'r0'),\n rbac.CreateSession('u0', 's0', {'r0'}),\n next(rbac.n_sessions)\n \n # None of these can be done without deleting the single\n # queryable session.\n self.assertFalse(rbac.emit_DeleteUser())\n self.assertFalse(rbac.emit_DeleteRole())\n self.assertFalse(rbac.emit_DeassignUser())\n self.assertFalse(rbac.emit_DeleteSession())\n \n # Ensure CheckAccess uses the queryable session (probabilistic).\n for _ in range(100):\n rbac.emit_CreateSession()\n rbac.emit_CheckAccess()\n self.assertEqual(rbac.log[-1], ('CheckAccess', 's0', 'op0', 'obj0'))\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.4427080750465393, "alphanum_fraction": 0.5189084410667419, "avg_line_length": 41.79191970825195, "blob_id": "25f9f24245ecb1ff3ab48eb3052235539cd38d47", "content_id": "4f653360aec6d448d3bdc0d9b12bedbc59dbbc75", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 21181, "license_type": "no_license", "max_line_length": 192, "num_lines": 495, "path": "/incoq/tests/programs/aggr/nested/obj_out.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from incoq.runtime import *\n# Comp1 := {(o, _e) : o in _U_Comp1, (o, o_f) in _F_f, (o_f, _e) in _M}\n# Aggr1 := sum(DEMQUERY(Comp1, [o], setmatch(Comp1, 'bu', o)), None)\n# Comp8 := {(s, _av1) : s in _U_Comp8, (s, o) in _M, _av1 in {Aggr1.smlookup('bu', o, None)}}\n# Comp8_Ts := {s : s in _U_Comp8}\n# Comp8_d_M := {(s, o) : s in Comp8_Ts, (s, o) in _M}\n# Comp8_To := {o : (s, o) in Comp8_d_M}\n# Aggr1_delta := {o : o in Comp8_To}\n_m_Comp8_out = Map()\ndef _maint__m_Comp8_out_add(_e):\n (v40_1, v40_2) = _e\n if (v40_1 not in _m_Comp8_out):\n _m_Comp8_out[v40_1] = set()\n _m_Comp8_out[v40_1].add(v40_2)\n\ndef _maint__m_Comp8_out_remove(_e):\n (v41_1, v41_2) = _e\n _m_Comp8_out[v41_1].remove(v41_2)\n if (len(_m_Comp8_out[v41_1]) == 0):\n del _m_Comp8_out[v41_1]\n\n_m__F_f_in = Map()\ndef _maint__m__F_f_in_add(_e):\n (v38_1, v38_2) = _e\n if (v38_2 not in _m__F_f_in):\n _m__F_f_in[v38_2] = set()\n _m__F_f_in[v38_2].add(v38_1)\n\n_m_Comp1_out = Map()\ndef _maint__m_Comp1_out_add(_e):\n (v36_1, v36_2) = _e\n if (v36_1 not in _m_Comp1_out):\n _m_Comp1_out[v36_1] = set()\n _m_Comp1_out[v36_1].add(v36_2)\n\ndef _maint__m_Comp1_out_remove(_e):\n (v37_1, v37_2) = _e\n _m_Comp1_out[v37_1].remove(v37_2)\n if (len(_m_Comp1_out[v37_1]) == 0):\n del _m_Comp1_out[v37_1]\n\n_m_Comp8_d_M_in = Map()\ndef _maint__m_Comp8_d_M_in_add(_e):\n (v34_1, v34_2) = _e\n if (v34_2 not in _m_Comp8_d_M_in):\n _m_Comp8_d_M_in[v34_2] = set()\n _m_Comp8_d_M_in[v34_2].add(v34_1)\n\ndef _maint__m_Comp8_d_M_in_remove(_e):\n (v35_1, v35_2) = _e\n _m_Comp8_d_M_in[v35_2].remove(v35_1)\n if (len(_m_Comp8_d_M_in[v35_2]) == 0):\n del _m_Comp8_d_M_in[v35_2]\n\n_m_Aggr1_out = Map()\ndef _maint__m_Aggr1_out_add(_e):\n (v32_1, v32_2) = _e\n if (v32_1 not in _m_Aggr1_out):\n _m_Aggr1_out[v32_1] = set()\n _m_Aggr1_out[v32_1].add(v32_2)\n\ndef _maint__m_Aggr1_out_remove(_e):\n (v33_1, v33_2) = _e\n _m_Aggr1_out[v33_1].remove(v33_2)\n if (len(_m_Aggr1_out[v33_1]) == 0):\n del _m_Aggr1_out[v33_1]\n\nAggr1_delta = RCSet()\ndef _maint_Aggr1_delta_Comp8_To_add(_e):\n # Iterate {v26_o : v26_o in deltamatch(Comp8_To, 'b', _e, 1)}\n v26_o = _e\n Aggr1_delta.add(v26_o)\n\nComp8_To = RCSet()\ndef _maint_Comp8_To_Comp8_d_M_add(_e):\n # Iterate {(v24_s, v24_o) : (v24_s, v24_o) in deltamatch(Comp8_d_M, 'bb', _e, 1)}\n (v24_s, v24_o) = _e\n if (v24_o not in Comp8_To):\n Comp8_To.add(v24_o)\n # Begin maint Aggr1_delta after \"Comp8_To.add(v24_o)\"\n _maint_Aggr1_delta_Comp8_To_add(v24_o)\n # End maint Aggr1_delta after \"Comp8_To.add(v24_o)\"\n else:\n Comp8_To.incref(v24_o)\n\ndef _maint_Comp8_To_Comp8_d_M_remove(_e):\n # Iterate {(v25_s, v25_o) : (v25_s, v25_o) in deltamatch(Comp8_d_M, 'bb', _e, 1)}\n (v25_s, v25_o) = _e\n if (Comp8_To.getref(v25_o) == 1):\n # Begin maint Aggr1_delta before \"Comp8_To.remove(v25_o)\"\n _maint_Aggr1_delta_Comp8_To_add(v25_o)\n # End maint Aggr1_delta before \"Comp8_To.remove(v25_o)\"\n Comp8_To.remove(v25_o)\n else:\n Comp8_To.decref(v25_o)\n\nComp8_d_M = RCSet()\ndef _maint_Comp8_d_M_Comp8_Ts_add(_e):\n # Iterate {(v20_s, v20_o) : v20_s in deltamatch(Comp8_Ts, 'b', _e, 1), (v20_s, v20_o) in _M}\n v20_s = _e\n if isinstance(v20_s, Set):\n for v20_o in v20_s:\n Comp8_d_M.add((v20_s, v20_o))\n # Begin maint _m_Comp8_d_M_in after \"Comp8_d_M.add((v20_s, v20_o))\"\n _maint__m_Comp8_d_M_in_add((v20_s, v20_o))\n # End maint _m_Comp8_d_M_in after \"Comp8_d_M.add((v20_s, v20_o))\"\n # Begin maint Comp8_To after \"Comp8_d_M.add((v20_s, v20_o))\"\n _maint_Comp8_To_Comp8_d_M_add((v20_s, v20_o))\n # End maint Comp8_To after \"Comp8_d_M.add((v20_s, v20_o))\"\n\ndef _maint_Comp8_d_M_Comp8_Ts_remove(_e):\n # Iterate {(v21_s, v21_o) : v21_s in deltamatch(Comp8_Ts, 'b', _e, 1), (v21_s, v21_o) in _M}\n v21_s = _e\n if isinstance(v21_s, Set):\n for v21_o in v21_s:\n # Begin maint Comp8_To before \"Comp8_d_M.remove((v21_s, v21_o))\"\n _maint_Comp8_To_Comp8_d_M_remove((v21_s, v21_o))\n # End maint Comp8_To before \"Comp8_d_M.remove((v21_s, v21_o))\"\n # Begin maint _m_Comp8_d_M_in before \"Comp8_d_M.remove((v21_s, v21_o))\"\n _maint__m_Comp8_d_M_in_remove((v21_s, v21_o))\n # End maint _m_Comp8_d_M_in before \"Comp8_d_M.remove((v21_s, v21_o))\"\n Comp8_d_M.remove((v21_s, v21_o))\n\ndef _maint_Comp8_d_M__M_add(_e):\n # Iterate {(v22_s, v22_o) : v22_s in Comp8_Ts, (v22_s, v22_o) in deltamatch(_M, 'bb', _e, 1)}\n (v22_s, v22_o) = _e\n if (v22_s in Comp8_Ts):\n Comp8_d_M.add((v22_s, v22_o))\n # Begin maint _m_Comp8_d_M_in after \"Comp8_d_M.add((v22_s, v22_o))\"\n _maint__m_Comp8_d_M_in_add((v22_s, v22_o))\n # End maint _m_Comp8_d_M_in after \"Comp8_d_M.add((v22_s, v22_o))\"\n # Begin maint Comp8_To after \"Comp8_d_M.add((v22_s, v22_o))\"\n _maint_Comp8_To_Comp8_d_M_add((v22_s, v22_o))\n # End maint Comp8_To after \"Comp8_d_M.add((v22_s, v22_o))\"\n\nComp8_Ts = RCSet()\ndef _maint_Comp8_Ts__U_Comp8_add(_e):\n # Iterate {v18_s : v18_s in deltamatch(_U_Comp8, 'b', _e, 1)}\n v18_s = _e\n Comp8_Ts.add(v18_s)\n # Begin maint Comp8_d_M after \"Comp8_Ts.add(v18_s)\"\n _maint_Comp8_d_M_Comp8_Ts_add(v18_s)\n # End maint Comp8_d_M after \"Comp8_Ts.add(v18_s)\"\n\ndef _maint_Comp8_Ts__U_Comp8_remove(_e):\n # Iterate {v19_s : v19_s in deltamatch(_U_Comp8, 'b', _e, 1)}\n v19_s = _e\n # Begin maint Comp8_d_M before \"Comp8_Ts.remove(v19_s)\"\n _maint_Comp8_d_M_Comp8_Ts_remove(v19_s)\n # End maint Comp8_d_M before \"Comp8_Ts.remove(v19_s)\"\n Comp8_Ts.remove(v19_s)\n\nComp8 = RCSet()\ndef _maint_Comp8__U_Comp8_add(_e):\n # Iterate {(v12_s, v12_o, v12__av1) : v12_s in deltamatch(_U_Comp8, 'b', _e, 1), (v12_s, v12_o) in _M, v12__av1 in {Aggr1.smlookup('bu', v12_o, None)}}\n v12_s = _e\n if isinstance(v12_s, Set):\n for v12_o in v12_s:\n for v12__av1 in (_m_Aggr1_out[v12_o] if (v12_o in _m_Aggr1_out) else set()):\n if ((v12_s, v12__av1) not in Comp8):\n Comp8.add((v12_s, v12__av1))\n # Begin maint _m_Comp8_out after \"Comp8.add((v12_s, v12__av1))\"\n _maint__m_Comp8_out_add((v12_s, v12__av1))\n # End maint _m_Comp8_out after \"Comp8.add((v12_s, v12__av1))\"\n else:\n Comp8.incref((v12_s, v12__av1))\n\ndef _maint_Comp8__U_Comp8_remove(_e):\n # Iterate {(v13_s, v13_o, v13__av1) : v13_s in deltamatch(_U_Comp8, 'b', _e, 1), (v13_s, v13_o) in _M, v13__av1 in {Aggr1.smlookup('bu', v13_o, None)}}\n v13_s = _e\n if isinstance(v13_s, Set):\n for v13_o in v13_s:\n for v13__av1 in (_m_Aggr1_out[v13_o] if (v13_o in _m_Aggr1_out) else set()):\n if (Comp8.getref((v13_s, v13__av1)) == 1):\n # Begin maint _m_Comp8_out before \"Comp8.remove((v13_s, v13__av1))\"\n _maint__m_Comp8_out_remove((v13_s, v13__av1))\n # End maint _m_Comp8_out before \"Comp8.remove((v13_s, v13__av1))\"\n Comp8.remove((v13_s, v13__av1))\n else:\n Comp8.decref((v13_s, v13__av1))\n\ndef _maint_Comp8__M_add(_e):\n # Iterate {(v14_s, v14_o, v14__av1) : v14_s in _U_Comp8, (v14_s, v14_o) in deltamatch(Comp8_d_M, 'bb', _e, 1), (v14_s, v14_o) in Comp8_d_M, v14__av1 in {Aggr1.smlookup('bu', v14_o, None)}}\n (v14_s, v14_o) = _e\n if (v14_s in _U_Comp8):\n if ((v14_s, v14_o) in Comp8_d_M):\n for v14__av1 in (_m_Aggr1_out[v14_o] if (v14_o in _m_Aggr1_out) else set()):\n if ((v14_s, v14__av1) not in Comp8):\n Comp8.add((v14_s, v14__av1))\n # Begin maint _m_Comp8_out after \"Comp8.add((v14_s, v14__av1))\"\n _maint__m_Comp8_out_add((v14_s, v14__av1))\n # End maint _m_Comp8_out after \"Comp8.add((v14_s, v14__av1))\"\n else:\n Comp8.incref((v14_s, v14__av1))\n\ndef _maint_Comp8_Aggr1_add(_e):\n # Iterate {(v16_s, v16_o, v16__av1) : v16_s in _U_Comp8, (v16_s, v16_o) in Comp8_d_M, (v16_o, v16__av1) in deltamatch(Aggr1, 'bb', _e, 1)}\n (v16_o, v16__av1) = _e\n for v16_s in (_m_Comp8_d_M_in[v16_o] if (v16_o in _m_Comp8_d_M_in) else set()):\n if (v16_s in _U_Comp8):\n if ((v16_s, v16__av1) not in Comp8):\n Comp8.add((v16_s, v16__av1))\n # Begin maint _m_Comp8_out after \"Comp8.add((v16_s, v16__av1))\"\n _maint__m_Comp8_out_add((v16_s, v16__av1))\n # End maint _m_Comp8_out after \"Comp8.add((v16_s, v16__av1))\"\n else:\n Comp8.incref((v16_s, v16__av1))\n\ndef _maint_Comp8_Aggr1_remove(_e):\n # Iterate {(v17_s, v17_o, v17__av1) : v17_s in _U_Comp8, (v17_s, v17_o) in Comp8_d_M, (v17_o, v17__av1) in deltamatch(Aggr1, 'bb', _e, 1)}\n (v17_o, v17__av1) = _e\n for v17_s in (_m_Comp8_d_M_in[v17_o] if (v17_o in _m_Comp8_d_M_in) else set()):\n if (v17_s in _U_Comp8):\n if (Comp8.getref((v17_s, v17__av1)) == 1):\n # Begin maint _m_Comp8_out before \"Comp8.remove((v17_s, v17__av1))\"\n _maint__m_Comp8_out_remove((v17_s, v17__av1))\n # End maint _m_Comp8_out before \"Comp8.remove((v17_s, v17__av1))\"\n Comp8.remove((v17_s, v17__av1))\n else:\n Comp8.decref((v17_s, v17__av1))\n\n_U_Comp8 = RCSet()\n_UEXT_Comp8 = Set()\ndef demand_Comp8(s):\n \"{(s, _av1) : s in _U_Comp8, (s, o) in _M, _av1 in {Aggr1.smlookup('bu', o, None)}}\"\n if (s not in _U_Comp8):\n _U_Comp8.add(s)\n # Begin maint Comp8_Ts after \"_U_Comp8.add(s)\"\n _maint_Comp8_Ts__U_Comp8_add(s)\n # End maint Comp8_Ts after \"_U_Comp8.add(s)\"\n # Begin maint Comp8 after \"_U_Comp8.add(s)\"\n _maint_Comp8__U_Comp8_add(s)\n # End maint Comp8 after \"_U_Comp8.add(s)\"\n # Begin maint demand_Aggr1 after \"_U_Comp8.add(s)\"\n for v28_o in Aggr1_delta.elements():\n demand_Aggr1(v28_o)\n Aggr1_delta.clear()\n # End maint demand_Aggr1 after \"_U_Comp8.add(s)\"\n else:\n _U_Comp8.incref(s)\n\ndef undemand_Comp8(s):\n \"{(s, _av1) : s in _U_Comp8, (s, o) in _M, _av1 in {Aggr1.smlookup('bu', o, None)}}\"\n if (_U_Comp8.getref(s) == 1):\n # Begin maint Comp8 before \"_U_Comp8.remove(s)\"\n _maint_Comp8__U_Comp8_remove(s)\n # End maint Comp8 before \"_U_Comp8.remove(s)\"\n # Begin maint Comp8_Ts before \"_U_Comp8.remove(s)\"\n _maint_Comp8_Ts__U_Comp8_remove(s)\n # End maint Comp8_Ts before \"_U_Comp8.remove(s)\"\n _U_Comp8.remove(s)\n # Begin maint undemand_Aggr1 after \"_U_Comp8.remove(s)\"\n for v29_o in Aggr1_delta.elements():\n undemand_Aggr1(v29_o)\n Aggr1_delta.clear()\n # End maint undemand_Aggr1 after \"_U_Comp8.remove(s)\"\n else:\n _U_Comp8.decref(s)\n\ndef query_Comp8(s):\n \"{(s, _av1) : s in _U_Comp8, (s, o) in _M, _av1 in {Aggr1.smlookup('bu', o, None)}}\"\n if (s not in _UEXT_Comp8):\n _UEXT_Comp8.add(s)\n demand_Comp8(s)\n return True\n\ndef _maint_Aggr1_add(_e):\n (v8_v1, v8_v2) = _e\n if (v8_v1 in _U_Aggr1):\n v8_val = _m_Aggr1_out.singlelookup(v8_v1)\n v8_val = (v8_val + v8_v2)\n v8_1 = v8_v1\n v8_elem = _m_Aggr1_out.singlelookup(v8_v1)\n # Begin maint Comp8 before \"Aggr1.remove((v8_1, v8_elem))\"\n _maint_Comp8_Aggr1_remove((v8_1, v8_elem))\n # End maint Comp8 before \"Aggr1.remove((v8_1, v8_elem))\"\n # Begin maint _m_Aggr1_out before \"Aggr1.remove((v8_1, v8_elem))\"\n _maint__m_Aggr1_out_remove((v8_1, v8_elem))\n # End maint _m_Aggr1_out before \"Aggr1.remove((v8_1, v8_elem))\"\n # Begin maint _m_Aggr1_out after \"Aggr1.add((v8_1, v8_val))\"\n _maint__m_Aggr1_out_add((v8_1, v8_val))\n # End maint _m_Aggr1_out after \"Aggr1.add((v8_1, v8_val))\"\n # Begin maint Comp8 after \"Aggr1.add((v8_1, v8_val))\"\n _maint_Comp8_Aggr1_add((v8_1, v8_val))\n # End maint Comp8 after \"Aggr1.add((v8_1, v8_val))\"\n\ndef _maint_Aggr1_remove(_e):\n (v9_v1, v9_v2) = _e\n if (v9_v1 in _U_Aggr1):\n v9_val = _m_Aggr1_out.singlelookup(v9_v1)\n v9_val = (v9_val - v9_v2)\n v9_1 = v9_v1\n v9_elem = _m_Aggr1_out.singlelookup(v9_v1)\n # Begin maint Comp8 before \"Aggr1.remove((v9_1, v9_elem))\"\n _maint_Comp8_Aggr1_remove((v9_1, v9_elem))\n # End maint Comp8 before \"Aggr1.remove((v9_1, v9_elem))\"\n # Begin maint _m_Aggr1_out before \"Aggr1.remove((v9_1, v9_elem))\"\n _maint__m_Aggr1_out_remove((v9_1, v9_elem))\n # End maint _m_Aggr1_out before \"Aggr1.remove((v9_1, v9_elem))\"\n # Begin maint _m_Aggr1_out after \"Aggr1.add((v9_1, v9_val))\"\n _maint__m_Aggr1_out_add((v9_1, v9_val))\n # End maint _m_Aggr1_out after \"Aggr1.add((v9_1, v9_val))\"\n # Begin maint Comp8 after \"Aggr1.add((v9_1, v9_val))\"\n _maint_Comp8_Aggr1_add((v9_1, v9_val))\n # End maint Comp8 after \"Aggr1.add((v9_1, v9_val))\"\n\n_U_Aggr1 = RCSet()\n_UEXT_Aggr1 = Set()\ndef demand_Aggr1(o):\n \"sum(DEMQUERY(Comp1, [o], setmatch(Comp1, 'bu', o)), None)\"\n if (o not in _U_Aggr1):\n _U_Aggr1.add(o)\n # Begin maint Aggr1 after \"_U_Aggr1.add(o)\"\n v10_val = 0\n for v10_elem in (_m_Comp1_out[o] if (o in _m_Comp1_out) else set()):\n v10_val = (v10_val + v10_elem)\n v10_1 = o\n # Begin maint _m_Aggr1_out after \"Aggr1.add((v10_1, v10_val))\"\n _maint__m_Aggr1_out_add((v10_1, v10_val))\n # End maint _m_Aggr1_out after \"Aggr1.add((v10_1, v10_val))\"\n # Begin maint Comp8 after \"Aggr1.add((v10_1, v10_val))\"\n _maint_Comp8_Aggr1_add((v10_1, v10_val))\n # End maint Comp8 after \"Aggr1.add((v10_1, v10_val))\"\n demand_Comp1(o)\n # End maint Aggr1 after \"_U_Aggr1.add(o)\"\n else:\n _U_Aggr1.incref(o)\n\ndef undemand_Aggr1(o):\n \"sum(DEMQUERY(Comp1, [o], setmatch(Comp1, 'bu', o)), None)\"\n if (_U_Aggr1.getref(o) == 1):\n # Begin maint Aggr1 before \"_U_Aggr1.remove(o)\"\n undemand_Comp1(o)\n v11_1 = o\n v11_elem = _m_Aggr1_out.singlelookup(o)\n # Begin maint Comp8 before \"Aggr1.remove((v11_1, v11_elem))\"\n _maint_Comp8_Aggr1_remove((v11_1, v11_elem))\n # End maint Comp8 before \"Aggr1.remove((v11_1, v11_elem))\"\n # Begin maint _m_Aggr1_out before \"Aggr1.remove((v11_1, v11_elem))\"\n _maint__m_Aggr1_out_remove((v11_1, v11_elem))\n # End maint _m_Aggr1_out before \"Aggr1.remove((v11_1, v11_elem))\"\n # End maint Aggr1 before \"_U_Aggr1.remove(o)\"\n _U_Aggr1.remove(o)\n else:\n _U_Aggr1.decref(o)\n\ndef query_Aggr1(o):\n \"sum(DEMQUERY(Comp1, [o], setmatch(Comp1, 'bu', o)), None)\"\n if (o not in _UEXT_Aggr1):\n _UEXT_Aggr1.add(o)\n demand_Aggr1(o)\n return True\n\nComp1 = RCSet()\ndef _maint_Comp1__U_Comp1_add(_e):\n # Iterate {(v2_o, v2_o_f, v2__e) : v2_o in deltamatch(_U_Comp1, 'b', _e, 1), (v2_o, v2_o_f) in _F_f, (v2_o_f, v2__e) in _M}\n v2_o = _e\n if hasattr(v2_o, 'f'):\n v2_o_f = v2_o.f\n if isinstance(v2_o_f, Set):\n for v2__e in v2_o_f:\n if ((v2_o, v2__e) not in Comp1):\n Comp1.add((v2_o, v2__e))\n # Begin maint _m_Comp1_out after \"Comp1.add((v2_o, v2__e))\"\n _maint__m_Comp1_out_add((v2_o, v2__e))\n # End maint _m_Comp1_out after \"Comp1.add((v2_o, v2__e))\"\n # Begin maint Aggr1 after \"Comp1.add((v2_o, v2__e))\"\n _maint_Aggr1_add((v2_o, v2__e))\n # End maint Aggr1 after \"Comp1.add((v2_o, v2__e))\"\n else:\n Comp1.incref((v2_o, v2__e))\n\ndef _maint_Comp1__U_Comp1_remove(_e):\n # Iterate {(v3_o, v3_o_f, v3__e) : v3_o in deltamatch(_U_Comp1, 'b', _e, 1), (v3_o, v3_o_f) in _F_f, (v3_o_f, v3__e) in _M}\n v3_o = _e\n if hasattr(v3_o, 'f'):\n v3_o_f = v3_o.f\n if isinstance(v3_o_f, Set):\n for v3__e in v3_o_f:\n if (Comp1.getref((v3_o, v3__e)) == 1):\n # Begin maint Aggr1 before \"Comp1.remove((v3_o, v3__e))\"\n _maint_Aggr1_remove((v3_o, v3__e))\n # End maint Aggr1 before \"Comp1.remove((v3_o, v3__e))\"\n # Begin maint _m_Comp1_out before \"Comp1.remove((v3_o, v3__e))\"\n _maint__m_Comp1_out_remove((v3_o, v3__e))\n # End maint _m_Comp1_out before \"Comp1.remove((v3_o, v3__e))\"\n Comp1.remove((v3_o, v3__e))\n else:\n Comp1.decref((v3_o, v3__e))\n\ndef _maint_Comp1__F_f_add(_e):\n # Iterate {(v4_o, v4_o_f, v4__e) : v4_o in _U_Comp1, (v4_o, v4_o_f) in deltamatch(_F_f, 'bb', _e, 1), (v4_o_f, v4__e) in _M}\n (v4_o, v4_o_f) = _e\n if (v4_o in _U_Comp1):\n if isinstance(v4_o_f, Set):\n for v4__e in v4_o_f:\n if ((v4_o, v4__e) not in Comp1):\n Comp1.add((v4_o, v4__e))\n # Begin maint _m_Comp1_out after \"Comp1.add((v4_o, v4__e))\"\n _maint__m_Comp1_out_add((v4_o, v4__e))\n # End maint _m_Comp1_out after \"Comp1.add((v4_o, v4__e))\"\n # Begin maint Aggr1 after \"Comp1.add((v4_o, v4__e))\"\n _maint_Aggr1_add((v4_o, v4__e))\n # End maint Aggr1 after \"Comp1.add((v4_o, v4__e))\"\n else:\n Comp1.incref((v4_o, v4__e))\n\ndef _maint_Comp1__M_add(_e):\n # Iterate {(v6_o, v6_o_f, v6__e) : v6_o in _U_Comp1, (v6_o, v6_o_f) in _F_f, (v6_o_f, v6__e) in deltamatch(_M, 'bb', _e, 1)}\n (v6_o_f, v6__e) = _e\n for v6_o in (_m__F_f_in[v6_o_f] if (v6_o_f in _m__F_f_in) else set()):\n if (v6_o in _U_Comp1):\n if ((v6_o, v6__e) not in Comp1):\n Comp1.add((v6_o, v6__e))\n # Begin maint _m_Comp1_out after \"Comp1.add((v6_o, v6__e))\"\n _maint__m_Comp1_out_add((v6_o, v6__e))\n # End maint _m_Comp1_out after \"Comp1.add((v6_o, v6__e))\"\n # Begin maint Aggr1 after \"Comp1.add((v6_o, v6__e))\"\n _maint_Aggr1_add((v6_o, v6__e))\n # End maint Aggr1 after \"Comp1.add((v6_o, v6__e))\"\n else:\n Comp1.incref((v6_o, v6__e))\n\n_U_Comp1 = RCSet()\n_UEXT_Comp1 = Set()\ndef demand_Comp1(o):\n '{(o, _e) : o in _U_Comp1, (o, o_f) in _F_f, (o_f, _e) in _M}'\n if (o not in _U_Comp1):\n _U_Comp1.add(o)\n # Begin maint Comp1 after \"_U_Comp1.add(o)\"\n _maint_Comp1__U_Comp1_add(o)\n # End maint Comp1 after \"_U_Comp1.add(o)\"\n else:\n _U_Comp1.incref(o)\n\ndef undemand_Comp1(o):\n '{(o, _e) : o in _U_Comp1, (o, o_f) in _F_f, (o_f, _e) in _M}'\n if (_U_Comp1.getref(o) == 1):\n # Begin maint Comp1 before \"_U_Comp1.remove(o)\"\n _maint_Comp1__U_Comp1_remove(o)\n # End maint Comp1 before \"_U_Comp1.remove(o)\"\n _U_Comp1.remove(o)\n else:\n _U_Comp1.decref(o)\n\ndef query_Comp1(o):\n '{(o, _e) : o in _U_Comp1, (o, o_f) in _F_f, (o_f, _e) in _M}'\n if (o not in _UEXT_Comp1):\n _UEXT_Comp1.add(o)\n demand_Comp1(o)\n return True\n\ns = Set()\nfor x in [1, 2, 3]:\n o = Obj()\n o.f = Set()\n # Begin maint _m__F_f_in after \"_F_f.add((o, Set()))\"\n _maint__m__F_f_in_add((o, Set()))\n # End maint _m__F_f_in after \"_F_f.add((o, Set()))\"\n # Begin maint Comp1 after \"_F_f.add((o, Set()))\"\n _maint_Comp1__F_f_add((o, Set()))\n # End maint Comp1 after \"_F_f.add((o, Set()))\"\n for y in [10, 20, 30]:\n v1 = o.f\n v1.add((x * y))\n # Begin maint Comp8_d_M after \"_M.add((v1, (x * y)))\"\n _maint_Comp8_d_M__M_add((v1, (x * y)))\n # End maint Comp8_d_M after \"_M.add((v1, (x * y)))\"\n # Begin maint Comp8 after \"_M.add((v1, (x * y)))\"\n _maint_Comp8__M_add((v1, (x * y)))\n # End maint Comp8 after \"_M.add((v1, (x * y)))\"\n # Begin maint Comp1 after \"_M.add((v1, (x * y)))\"\n _maint_Comp1__M_add((v1, (x * y)))\n # End maint Comp1 after \"_M.add((v1, (x * y)))\"\n # Begin maint demand_Aggr1 after \"_M.add((v1, (x * y)))\"\n for v30_o in Aggr1_delta.elements():\n demand_Aggr1(v30_o)\n Aggr1_delta.clear()\n # End maint demand_Aggr1 after \"_M.add((v1, (x * y)))\"\n s.add(o)\n # Begin maint Comp8_d_M after \"_M.add((s, o))\"\n _maint_Comp8_d_M__M_add((s, o))\n # End maint Comp8_d_M after \"_M.add((s, o))\"\n # Begin maint Comp8 after \"_M.add((s, o))\"\n _maint_Comp8__M_add((s, o))\n # End maint Comp8 after \"_M.add((s, o))\"\n # Begin maint Comp1 after \"_M.add((s, o))\"\n _maint_Comp1__M_add((s, o))\n # End maint Comp1 after \"_M.add((s, o))\"\n # Begin maint demand_Aggr1 after \"_M.add((s, o))\"\n for v31_o in Aggr1_delta.elements():\n demand_Aggr1(v31_o)\n Aggr1_delta.clear()\n # End maint demand_Aggr1 after \"_M.add((s, o))\"\nprint(sorted((query_Comp8(s) and (_m_Comp8_out[s] if (s in _m_Comp8_out) else set()))))" }, { "alpha_fraction": 0.6631205677986145, "alphanum_fraction": 0.6631205677986145, "avg_line_length": 22.020408630371094, "blob_id": "6ac3fc2414a2a3708ccff72c5a4433f3339df837", "content_id": "2cb3de6c5bec8bba9ad64ac3a7a9c6a93f2b8993", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1128, "license_type": "no_license", "max_line_length": 79, "num_lines": 49, "path": "/experiments/django/django_simp_in.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "# Simplified version that gets rid of the users parameter\n# in favor of passing in the actual user object. This version\n# is also closer to the actual query appearing in the Django\n# source code.\n\n# Changed from django_in.py:\n# - the query is different, and takes in a user instead of a user id\n# - the users set is deleted, and make_user() modified accordingly\n\nfrom incoq.runtime import *\n\nOPTIONS(\n obj_domain = True,\n)\n\nQUERYOPTIONS(\n '{p.name for g in user.groups for p in g.perms if g.active}',\n uset_mode = 'explicit',\n uset_params = ['user'],\n)\n\ndef make_user(id):\n user = Obj()\n user.id = id\n user.groups = Set()\n return user\n\ndef make_group(active):\n group = Obj()\n group.active = active\n group.perms = Set()\n return group\n\ndef make_perm(name):\n perm = Obj()\n perm.name = name\n return perm\n\ndef add_group(u, g):\n u.groups.add(g)\n\ndef add_perm(g, p):\n g.perms.add(p)\n\ndef do_query(user):\n return {p.name for g in user.groups for p in g.perms if g.active}\n\ndef do_query_nodemand(user):\n return NODEMAND({p.name for g in user.groups for p in g.perms if g.active})\n" }, { "alpha_fraction": 0.5051546096801758, "alphanum_fraction": 0.5283505320549011, "avg_line_length": 15.8695650100708, "blob_id": "e1ac10fe7377b5588b3c01c8be34dbfe7a5a83ba", "content_id": "03ac2157d8df6277f95cab77db233db096b0c2e6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 388, "license_type": "no_license", "max_line_length": 52, "num_lines": 23, "path": "/incoq/tests/programs/aggr/nested/obj_in.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "# Aggregate of a field retrieval in a comprehension.\n\nfrom incoq.runtime import *\n\nOPTIONS(\n obj_domain = True,\n default_impl = 'inc',\n)\nQUERYOPTIONS(\n '{sum(o.f) for o in s}',\n params = ['s'],\n impl = 'dem',\n)\n\ns = Set()\nfor x in [1, 2, 3]:\n o = Obj()\n o.f = Set()\n for y in [10, 20, 30]:\n o.f.add(x * y)\n s.add(o)\n\nprint(sorted({sum(o.f) for o in s}))\n" }, { "alpha_fraction": 0.5111111402511597, "alphanum_fraction": 0.5456790328025818, "avg_line_length": 13.464285850524902, "blob_id": "d24f6d33e38f2c32a78dd93a245d066938725434", "content_id": "d062905eaae32baa9a7266c4288861a54ec6d6e6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 405, "license_type": "no_license", "max_line_length": 43, "num_lines": 28, "path": "/incoq/tests/programs/aggr/nested/basic_in.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "# Basic incrementalized comprehensions.\n\nfrom incoq.runtime import *\n\nOPTIONS(\n default_impl = 'inc',\n)\n\nQUERYOPTIONS(\n '{x for x in S if x > sum(R)}',\n impl = 'dem',\n)\n\nS = Set()\nR = Set()\n\nfor e in [1, 5, 15, 20]:\n S.add(e)\n\nfor e in [1, 2, 3, 4]:\n R.add(e)\n\nprint(sorted({x for x in S if x > sum(R)}))\n\nfor e in [1, 2, 3, 4]:\n R.remove(e)\n\nprint(sorted({x for x in S if x > sum(R)}))\n" }, { "alpha_fraction": 0.37733888626098633, "alphanum_fraction": 0.40800416469573975, "avg_line_length": 27.294116973876953, "blob_id": "8635504724803bd7b815c161dc1ad3c2c1cc4021", "content_id": "79ff9f14644a9b42582949e869279c34a305821d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1924, "license_type": "no_license", "max_line_length": 79, "num_lines": 68, "path": "/incoq/tests/util/test_seq.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "###############################################################################\n# test_seq.py #\n# Author: Jon Brandvein #\n###############################################################################\n\n\"\"\"Unit tests for the seq module.\"\"\"\n\n\nimport unittest\n\nfrom incoq.util.seq import *\n\n\nclass TestSeq(unittest.TestCase):\n \n def test_zip_strict(self):\n zip_strict([1, 2], [3, 4])\n \n with self.assertRaises(AssertionError):\n zip_strict([1, 2], [3, 4, 5])\n \n def test_no_duplicates(self):\n self.assertTrue(no_duplicates([1, 2, 3]))\n self.assertFalse(no_duplicates([1, 2, 2]))\n \n def test_get_duplicates(self):\n result = get_duplicates([1, 2, 3, 4, 2, 4, 5, 4])\n expected = [2, 4]\n self.assertEqual(result, expected)\n \n def test_elim_duplicates(self):\n result = elim_duplicates([1, 2, 3, 4, 2, 4, 5, 4])\n expected = [1, 2, 3, 4, 5]\n self.assertEqual(result, expected)\n \n def test_map_tuple(self):\n f = lambda x: 2 * x\n \n inputs = [\n 4,\n (1, 2),\n [1, 2]\n ]\n \n expecteds = [\n 8,\n (2, 4),\n [2, 4]\n ]\n \n for i in range(len(inputs)):\n result = map_tuple(f, inputs[i])\n self.assertEqual(result, expecteds[i],\n msg='(In case ' + str(i) + ')')\n \n def test_map_tuple_rec(self):\n f = lambda x: 2 * x if isinstance(x, int) else x\n \n input = (1, (2, 3))\n norec_output = (2, (2, 3))\n rec_output = (2, (4, 6))\n \n self.assertEqual(map_tuple(f, input), norec_output)\n self.assertEqual(map_tuple_rec(f, input), rec_output)\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.4063451886177063, "alphanum_fraction": 0.46776649355888367, "avg_line_length": 39.21428680419922, "blob_id": "d934b71973af20e791cb6941a8c4d0468a6b53d5", "content_id": "94bdad4c47d7860d85529045e18d0f4f1096ca98", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3940, "license_type": "no_license", "max_line_length": 132, "num_lines": 98, "path": "/incoq/tests/programs/objcomp/if_out.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from incoq.runtime import *\n# Comp1 := {(s, t, o_i) : (s, o) in _M, (t, o) in _M, (o, o_i) in _F_i}\n_m_Comp1_bbu = Map()\ndef _maint__m_Comp1_bbu_add(_e):\n (v7_1, v7_2, v7_3) = _e\n if ((v7_1, v7_2) not in _m_Comp1_bbu):\n _m_Comp1_bbu[(v7_1, v7_2)] = set()\n _m_Comp1_bbu[(v7_1, v7_2)].add(v7_3)\n\ndef _maint__m_Comp1_bbu_remove(_e):\n (v8_1, v8_2, v8_3) = _e\n _m_Comp1_bbu[(v8_1, v8_2)].remove(v8_3)\n if (len(_m_Comp1_bbu[(v8_1, v8_2)]) == 0):\n del _m_Comp1_bbu[(v8_1, v8_2)]\n\n_m__M_in = Map()\ndef _maint__m__M_in_add(_e):\n (v5_1, v5_2) = _e\n if (v5_2 not in _m__M_in):\n _m__M_in[v5_2] = set()\n _m__M_in[v5_2].add(v5_1)\n\nComp1 = RCSet()\ndef _maint_Comp1__M_add(_e):\n v1_DAS = set()\n # Iterate {(v1_s, v1_o, v1_t, v1_o_i) : (v1_s, v1_o) in deltamatch(_M, 'bb', _e, 1), (v1_t, v1_o) in _M, (v1_o, v1_o_i) in _F_i}\n (v1_s, v1_o) = _e\n if hasattr(v1_o, 'i'):\n v1_o_i = v1_o.i\n for v1_t in (_m__M_in[v1_o] if (v1_o in _m__M_in) else set()):\n if ((v1_s, v1_o, v1_t, v1_o_i) not in v1_DAS):\n v1_DAS.add((v1_s, v1_o, v1_t, v1_o_i))\n # Iterate {(v1_s, v1_o, v1_t, v1_o_i) : (v1_s, v1_o) in _M, (v1_t, v1_o) in deltamatch(_M, 'bb', _e, 1), (v1_o, v1_o_i) in _F_i}\n (v1_t, v1_o) = _e\n if hasattr(v1_o, 'i'):\n v1_o_i = v1_o.i\n for v1_s in (_m__M_in[v1_o] if (v1_o in _m__M_in) else set()):\n if ((v1_s, v1_o, v1_t, v1_o_i) not in v1_DAS):\n v1_DAS.add((v1_s, v1_o, v1_t, v1_o_i))\n for (v1_s, v1_o, v1_t, v1_o_i) in v1_DAS:\n if ((v1_s, v1_t, v1_o_i) not in Comp1):\n Comp1.add((v1_s, v1_t, v1_o_i))\n # Begin maint _m_Comp1_bbu after \"Comp1.add((v1_s, v1_t, v1_o_i))\"\n _maint__m_Comp1_bbu_add((v1_s, v1_t, v1_o_i))\n # End maint _m_Comp1_bbu after \"Comp1.add((v1_s, v1_t, v1_o_i))\"\n else:\n Comp1.incref((v1_s, v1_t, v1_o_i))\n del v1_DAS\n\ndef _maint_Comp1__F_i_add(_e):\n # Iterate {(v3_s, v3_o, v3_t, v3_o_i) : (v3_s, v3_o) in _M, (v3_t, v3_o) in _M, (v3_o, v3_o_i) in deltamatch(_F_i, 'bb', _e, 1)}\n (v3_o, v3_o_i) = _e\n for v3_s in (_m__M_in[v3_o] if (v3_o in _m__M_in) else set()):\n for v3_t in (_m__M_in[v3_o] if (v3_o in _m__M_in) else set()):\n if ((v3_s, v3_t, v3_o_i) not in Comp1):\n Comp1.add((v3_s, v3_t, v3_o_i))\n # Begin maint _m_Comp1_bbu after \"Comp1.add((v3_s, v3_t, v3_o_i))\"\n _maint__m_Comp1_bbu_add((v3_s, v3_t, v3_o_i))\n # End maint _m_Comp1_bbu after \"Comp1.add((v3_s, v3_t, v3_o_i))\"\n else:\n Comp1.incref((v3_s, v3_t, v3_o_i))\n\ns1 = Set()\ns2 = Set()\nt = Set()\nfor i in {1, 2, 3, 4, 5}:\n o = Obj()\n o.i = i\n # Begin maint Comp1 after \"_F_i.add((o, i))\"\n _maint_Comp1__F_i_add((o, i))\n # End maint Comp1 after \"_F_i.add((o, i))\"\n if (i % 2):\n s1.add(o)\n # Begin maint _m__M_in after \"_M.add((s1, o))\"\n _maint__m__M_in_add((s1, o))\n # End maint _m__M_in after \"_M.add((s1, o))\"\n # Begin maint Comp1 after \"_M.add((s1, o))\"\n _maint_Comp1__M_add((s1, o))\n # End maint Comp1 after \"_M.add((s1, o))\"\n else:\n s2.add(o)\n # Begin maint _m__M_in after \"_M.add((s2, o))\"\n _maint__m__M_in_add((s2, o))\n # End maint _m__M_in after \"_M.add((s2, o))\"\n # Begin maint Comp1 after \"_M.add((s2, o))\"\n _maint_Comp1__M_add((s2, o))\n # End maint Comp1 after \"_M.add((s2, o))\"\n t.add(o)\n # Begin maint _m__M_in after \"_M.add((t, o))\"\n _maint__m__M_in_add((t, o))\n # End maint _m__M_in after \"_M.add((t, o))\"\n # Begin maint Comp1 after \"_M.add((t, o))\"\n _maint_Comp1__M_add((t, o))\n # End maint Comp1 after \"_M.add((t, o))\"\ns = s1\nprint(sorted((_m_Comp1_bbu[(s, t)] if ((s, t) in _m_Comp1_bbu) else set())))\ns = s2\nprint(sorted((_m_Comp1_bbu[(s, t)] if ((s, t) in _m_Comp1_bbu) else set())))" }, { "alpha_fraction": 0.5937669277191162, "alphanum_fraction": 0.5951219797134399, "avg_line_length": 33.48598098754883, "blob_id": "00afd2ab28b6989fffbab9a9912f7ca840881f90", "content_id": "67dc217482cf7d84dc7790c8492f2243e36fa828", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3690, "license_type": "no_license", "max_line_length": 79, "num_lines": 107, "path": "/experiments/jql/java_bridge.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "###############################################################################\n# java_bridge.py #\n# Author: Jon Brandvein #\n###############################################################################\n\n\"\"\"Bridge between frexp testing framework and a Java program for\nrunning the JQL queries within the actual JQL system.\n\nNOTE: This file is highly dependent on my system-specific paths,\nand on my installations of Cygwin, Java, and JQL.\n\nWe use a custom test runner. The standard test runner uses the\nmultiprocessing library to spawn a Python process, and sends the\ndataset over in pickled form. Here we use the subprocess library\nto spawn Java (actually, the JQL wrapper script), and send the\ndataset over in JSON form. Result data comes back as JSON too.\n\"\"\"\n\n\nimport os\nfrom os.path import join\nimport subprocess\nimport json\nfrom types import SimpleNamespace\nimport configparser\n\n\nclass JavaError(subprocess.CalledProcessError):\n \n def __str__(self):\n return ('Command {} returned non-zero exit status {}\\n'\n 'stderr output:\\n{}'.format(\n self.cmd, self.returncode, self.output))\n\n# CLASSPATH is windows-style, but regular PATH is unix-style since\n# it uses cygwin.\n\ndef get_config():\n \"\"\"Read config.txt to determine appropriate environment variables\n and paths.\n \"\"\"\n config = configparser.ConfigParser()\n dirname = os.path.dirname(__file__)\n config.read(join(dirname, '../config.txt'))\n jqlconf = config['jql']\n \n ns = SimpleNamespace()\n \n ns.bash_cmd = jqlconf['BASH_CMD']\n ns.jql_home = jqlconf['JQL_HOME']\n ns.java_home = jqlconf['JAVA_HOME']\n ns.aspectj_home = jqlconf['ASPECTJ_HOME']\n ns.jsonsimple_jarpath = jqlconf['JSONSIMPLE_JARPATH']\n ns.aspectj_jarpath = jqlconf['ASPECTJ_JARPATH']\n \n ns.working_dir = join(dirname, 'java')\n ns.jql_cmd = join(ns.jql_home, 'bin/jql')\n ns.path = join(ns.java_home, 'bin') + ':/bin'\n ns.cmd = [ns.bash_cmd, ns.jql_cmd, '-notracker', '-caching']\n ns.classpath = ';'.join([ns.jsonsimple_jarpath,\n ns.aspectj_jarpath, ns.working_dir])\n \n return ns\n\ndef spawn_java(config, level, cache, verify, dataset):\n \"\"\"Spawn the Java process, pass in a dataset object, and return\n a results object.\n \n - level (\"1\" | \"2\" | \"3\") controls which query is run\n - cache (boolean) controls whether JQL incrementalization is\n enabled\n - verify (boolean) controls whether we are timing or verifying\n output correctness\n \"\"\"\n PIPE = subprocess.PIPE\n \n env = dict(os.environ.items())\n oldpath = env['PATH']\n newpath = config.path + ':' + oldpath\n env.update({'JQL_HOME': config.jql_home,\n 'ASPECTJ_HOME': config.aspectj_home,\n 'JAVA_HOME': config.java_home,\n 'CLASSPATH': config.classpath,\n 'PATH': newpath})\n \n args = list(config.cmd)\n args.append('jqlexp/Level' + level)\n args.append('cache' if cache else 'nocache')\n args.append('verify' if verify else 'benchmark')\n \n child = subprocess.Popen(\n args, bufsize=-1,\n stdin=PIPE, stdout=PIPE,\n cwd=config.working_dir, env=env, universal_newlines=True)\n \n data_in = json.dumps(dataset)\n stdout, stderr = child.communicate(data_in)\n \n # Uncomment to debug:\n# print('stdout:\\n' + stdout)\n# print('stderr:\\n' + stderr)\n \n if child.returncode != 0:\n raise JavaError(child.returncode, config.cmd, stderr)\n \n results = json.loads(stdout)\n return results\n" }, { "alpha_fraction": 0.6269896030426025, "alphanum_fraction": 0.6283736824989319, "avg_line_length": 24.803571701049805, "blob_id": "b3736f938fae50c915aca99f7f89c09c73fa4817", "content_id": "089398da7c987548fa55df4794675bde91ff07c7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1445, "license_type": "no_license", "max_line_length": 77, "num_lines": 56, "path": "/experiments/django/django_in.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "# Authentication query from the OSQ paper, modeled on Django.\n\n# Fields like user id, user.groups, and group.perms are 1-to-1.\n# If U is small, we'd benefit from the join heuristic running\n# U and the inverse of F_id first, since that would get us the\n# few queried users.\n\nfrom incoq.runtime import *\n\nOPTIONS(\n obj_domain = True,\n)\n\nQUERYOPTIONS(\n '''{p.name for u in users for g in u.groups for p in g.perms\n if u.id == uid if g.active}''',\n # The parameters are users and uid. In the OSQ paper,\n # only users is tracked in the U-set because uid is a\n # constrained parameter. Here, since we don't reassign\n # to users, we view it as a relation. Consequently there\n # is no need for a distinct U-set at all.\n)\n\nusers = Set()\n\ndef make_user(id):\n user = Obj()\n user.id = id\n user.groups = Set()\n users.add(user)\n return user\n\ndef make_group(active):\n group = Obj()\n group.active = active\n group.perms = Set()\n return group\n\ndef make_perm(name):\n perm = Obj()\n perm.name = name\n return perm\n\ndef add_group(u, g):\n u.groups.add(g)\n\ndef add_perm(g, p):\n g.perms.add(p)\n\ndef do_query(uid):\n return {p.name for u in users for g in u.groups for p in g.perms\n if u.id == uid if g.active}\n\ndef do_query_nodemand(uid):\n return NODEMAND({p.name for u in users for g in u.groups for p in g.perms\n if u.id == uid if g.active})\n" }, { "alpha_fraction": 0.4761398136615753, "alphanum_fraction": 0.5256838798522949, "avg_line_length": 32.406089782714844, "blob_id": "405ea8a7501abaff2857aa2d48f2e5633328d04b", "content_id": "b3f5329e1f220ec13019c1cb8460a6684639b691", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6580, "license_type": "no_license", "max_line_length": 129, "num_lines": 197, "path": "/incoq/tests/programs/aggr/nested/basic_out.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from incoq.runtime import *\n# Aggr1 := sum(R, None)\n# Comp1 := {x : x in S, _av1 in {Aggr1.smlookup('u', (), None)}, (x > _av1)}\n# Aggr1_delta := {() : _ in S}\n_m_R_u = Map()\ndef _maint__m_R_u_add(_e):\n v16_1 = _e\n if (() not in _m_R_u):\n _m_R_u[()] = set()\n _m_R_u[()].add(v16_1)\n\ndef _maint__m_R_u_remove(_e):\n v17_1 = _e\n _m_R_u[()].remove(v17_1)\n if (len(_m_R_u[()]) == 0):\n del _m_R_u[()]\n\n_m_Aggr1_u = Map()\ndef _maint__m_Aggr1_u_add(_e):\n v14_1 = _e\n if (() not in _m_Aggr1_u):\n _m_Aggr1_u[()] = set()\n _m_Aggr1_u[()].add(v14_1)\n\ndef _maint__m_Aggr1_u_remove(_e):\n v15_1 = _e\n _m_Aggr1_u[()].remove(v15_1)\n if (len(_m_Aggr1_u[()]) == 0):\n del _m_Aggr1_u[()]\n\n_m_S_w = Map()\ndef _maint__m_S_w_add(_e):\n if (() not in _m_S_w):\n _m_S_w[()] = RCSet()\n if (() not in _m_S_w[()]):\n _m_S_w[()].add(())\n else:\n _m_S_w[()].incref(())\n\nAggr1_delta = RCSet()\ndef _maint_Aggr1_delta_S_add(_e):\n # Iterate {() : _ in deltamatch(S, 'w', _e, 1)}\n for _ in setmatch(({_e} if ((_m_S_w[()] if (() in _m_S_w) else RCSet()).getref(()) == 1) else {}), 'w', ()):\n Aggr1_delta.add(())\n\nComp1 = RCSet()\ndef _maint_Comp1_S_add(_e):\n # Iterate {(v5_x, v5__av1) : v5_x in deltamatch(S, 'b', _e, 1), v5__av1 in {Aggr1.smlookup('u', (), None)}, (v5_x > v5__av1)}\n v5_x = _e\n for v5__av1 in Aggr1:\n if (v5_x > v5__av1):\n if (v5_x not in Comp1):\n Comp1.add(v5_x)\n else:\n Comp1.incref(v5_x)\n\ndef _maint_Comp1_Aggr1_add(_e):\n # Iterate {(v7_x, v7__av1) : v7_x in S, v7__av1 in deltamatch(Aggr1, 'b', _e, 1), (v7_x > v7__av1)}\n v7__av1 = _e\n for v7_x in S:\n if (v7_x > v7__av1):\n if (v7_x not in Comp1):\n Comp1.add(v7_x)\n else:\n Comp1.incref(v7_x)\n\ndef _maint_Comp1_Aggr1_remove(_e):\n # Iterate {(v8_x, v8__av1) : v8_x in S, v8__av1 in deltamatch(Aggr1, 'b', _e, 1), (v8_x > v8__av1)}\n v8__av1 = _e\n for v8_x in S:\n if (v8_x > v8__av1):\n if (Comp1.getref(v8_x) == 1):\n Comp1.remove(v8_x)\n else:\n Comp1.decref(v8_x)\n\nAggr1 = Set()\ndef _maint_Aggr1_add(_e):\n v1_v1 = _e\n if (() in _U_Aggr1):\n v1_val = _m_Aggr1_u.singlelookup(())\n v1_val = (v1_val + v1_v1)\n v1_elem = _m_Aggr1_u.singlelookup(())\n # Begin maint Comp1 before \"Aggr1.remove(v1_elem)\"\n _maint_Comp1_Aggr1_remove(v1_elem)\n # End maint Comp1 before \"Aggr1.remove(v1_elem)\"\n # Begin maint _m_Aggr1_u before \"Aggr1.remove(v1_elem)\"\n _maint__m_Aggr1_u_remove(v1_elem)\n # End maint _m_Aggr1_u before \"Aggr1.remove(v1_elem)\"\n Aggr1.remove(v1_elem)\n Aggr1.add(v1_val)\n # Begin maint _m_Aggr1_u after \"Aggr1.add(v1_val)\"\n _maint__m_Aggr1_u_add(v1_val)\n # End maint _m_Aggr1_u after \"Aggr1.add(v1_val)\"\n # Begin maint Comp1 after \"Aggr1.add(v1_val)\"\n _maint_Comp1_Aggr1_add(v1_val)\n # End maint Comp1 after \"Aggr1.add(v1_val)\"\n\ndef _maint_Aggr1_remove(_e):\n v2_v1 = _e\n if (() in _U_Aggr1):\n v2_val = _m_Aggr1_u.singlelookup(())\n v2_val = (v2_val - v2_v1)\n v2_elem = _m_Aggr1_u.singlelookup(())\n # Begin maint Comp1 before \"Aggr1.remove(v2_elem)\"\n _maint_Comp1_Aggr1_remove(v2_elem)\n # End maint Comp1 before \"Aggr1.remove(v2_elem)\"\n # Begin maint _m_Aggr1_u before \"Aggr1.remove(v2_elem)\"\n _maint__m_Aggr1_u_remove(v2_elem)\n # End maint _m_Aggr1_u before \"Aggr1.remove(v2_elem)\"\n Aggr1.remove(v2_elem)\n Aggr1.add(v2_val)\n # Begin maint _m_Aggr1_u after \"Aggr1.add(v2_val)\"\n _maint__m_Aggr1_u_add(v2_val)\n # End maint _m_Aggr1_u after \"Aggr1.add(v2_val)\"\n # Begin maint Comp1 after \"Aggr1.add(v2_val)\"\n _maint_Comp1_Aggr1_add(v2_val)\n # End maint Comp1 after \"Aggr1.add(v2_val)\"\n\n_U_Aggr1 = RCSet()\n_UEXT_Aggr1 = Set()\ndef demand_Aggr1():\n 'sum(R, None)'\n if (() not in _U_Aggr1):\n _U_Aggr1.add(())\n # Begin maint Aggr1 after \"_U_Aggr1.add(())\"\n v3_val = 0\n for v3_elem in (_m_R_u[()] if (() in _m_R_u) else set()):\n v3_val = (v3_val + v3_elem)\n Aggr1.add(v3_val)\n # Begin maint _m_Aggr1_u after \"Aggr1.add(v3_val)\"\n _maint__m_Aggr1_u_add(v3_val)\n # End maint _m_Aggr1_u after \"Aggr1.add(v3_val)\"\n # Begin maint Comp1 after \"Aggr1.add(v3_val)\"\n _maint_Comp1_Aggr1_add(v3_val)\n # End maint Comp1 after \"Aggr1.add(v3_val)\"\n # End maint Aggr1 after \"_U_Aggr1.add(())\"\n else:\n _U_Aggr1.incref(())\n\ndef undemand_Aggr1():\n 'sum(R, None)'\n if (_U_Aggr1.getref(()) == 1):\n # Begin maint Aggr1 before \"_U_Aggr1.remove(())\"\n v4_elem = _m_Aggr1_u.singlelookup(())\n # Begin maint Comp1 before \"Aggr1.remove(v4_elem)\"\n _maint_Comp1_Aggr1_remove(v4_elem)\n # End maint Comp1 before \"Aggr1.remove(v4_elem)\"\n # Begin maint _m_Aggr1_u before \"Aggr1.remove(v4_elem)\"\n _maint__m_Aggr1_u_remove(v4_elem)\n # End maint _m_Aggr1_u before \"Aggr1.remove(v4_elem)\"\n Aggr1.remove(v4_elem)\n # End maint Aggr1 before \"_U_Aggr1.remove(())\"\n _U_Aggr1.remove(())\n else:\n _U_Aggr1.decref(())\n\ndef query_Aggr1():\n 'sum(R, None)'\n if (() not in _UEXT_Aggr1):\n _UEXT_Aggr1.add(())\n demand_Aggr1()\n return True\n\nS = Set()\nfor e in [1, 5, 15, 20]:\n S.add(e)\n # Begin maint _m_S_w after \"S.add(e)\"\n _maint__m_S_w_add(e)\n # End maint _m_S_w after \"S.add(e)\"\n # Begin maint Aggr1_delta after \"S.add(e)\"\n _maint_Aggr1_delta_S_add(e)\n # End maint Aggr1_delta after \"S.add(e)\"\n # Begin maint Comp1 after \"S.add(e)\"\n _maint_Comp1_S_add(e)\n # End maint Comp1 after \"S.add(e)\"\n # Begin maint demand_Aggr1 after \"S.add(e)\"\n for _ in Aggr1_delta.elements():\n demand_Aggr1()\n Aggr1_delta.clear()\n # End maint demand_Aggr1 after \"S.add(e)\"\nfor e in [1, 2, 3, 4]:\n # Begin maint _m_R_u after \"R.add(e)\"\n _maint__m_R_u_add(e)\n # End maint _m_R_u after \"R.add(e)\"\n # Begin maint Aggr1 after \"R.add(e)\"\n _maint_Aggr1_add(e)\n # End maint Aggr1 after \"R.add(e)\"\nprint(sorted(Comp1))\nfor e in [1, 2, 3, 4]:\n # Begin maint Aggr1 before \"R.remove(e)\"\n _maint_Aggr1_remove(e)\n # End maint Aggr1 before \"R.remove(e)\"\n # Begin maint _m_R_u before \"R.remove(e)\"\n _maint__m_R_u_remove(e)\n # End maint _m_R_u before \"R.remove(e)\"\nprint(sorted(Comp1))" }, { "alpha_fraction": 0.3125794231891632, "alphanum_fraction": 0.3297331631183624, "avg_line_length": 38.84810256958008, "blob_id": "2a44608e5d663755f1e3c6e957f8ee7e77e2d9b1", "content_id": "51a11c435e71a22aa2e6164b873e04dbec327029", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6296, "license_type": "no_license", "max_line_length": 73, "num_lines": 158, "path": "/incoq/tests/invinc/demand/test_demtrans.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Unit tests for tags.py.\"\"\"\n\n\nimport unittest\n\nimport incoq.compiler.incast as L\nfrom incoq.compiler.central import CentralCase\nfrom incoq.compiler.demand.demtrans import *\n\n\nclass TransCase(CentralCase):\n \n def test_deminc(self):\n comp = L.pe('COMP({x for (x, y) in S for (y, z) in T}, [z], '\n '{\"uset_force\": False})')\n tree = L.p('''\n S.add(e)\n print(COMP)\n ''', subst={'COMP': comp})\n \n tree = deminc_relcomp(tree, self.manager, comp, 'Q')\n tree = L.elim_deadfuncs(tree, lambda n: n.startswith('_maint_'))\n \n exp_tree = L.p('''\n Q_dT = RCSet()\n def _maint_Q_dT_Q_Ty1_add(_e):\n for (v7_y, v7_z) in COMP({(v7_y, v7_z)\n for v7_y in deltamatch(Q_Ty1, 'b', _e, 1)\n for (v7_y, v7_z) in T},\n [], {'_deltaelem': '_e',\n '_deltalhs': 'v7_y',\n '_deltaop': 'add',\n '_deltarel': 'Q_Ty1',\n 'impl': 'auxonly'}):\n Q_dT.add((v7_y, v7_z))\n \n def _maint_Q_dT_Q_Ty1_remove(_e):\n for (v8_y, v8_z) in COMP({(v8_y, v8_z)\n for v8_y in deltamatch(Q_Ty1, 'b', _e, 1)\n for (v8_y, v8_z) in T},\n [], {'_deltaelem': '_e',\n '_deltalhs': 'v8_y',\n '_deltaop': 'remove',\n '_deltarel': 'Q_Ty1',\n 'impl': 'auxonly'}):\n Q_dT.remove((v8_y, v8_z))\n \n Q_Ty1 = RCSet()\n def _maint_Q_Ty1_S_add(_e):\n for (v5_x, v5_y) in COMP({(v5_x, v5_y)\n for (v5_x, v5_y) in deltamatch(S, 'bb', _e, 1)},\n [], {'_deltaelem': '_e',\n '_deltalhs': '(v5_x, v5_y)',\n '_deltaop': 'add',\n '_deltarel': 'S',\n 'impl': 'auxonly'}):\n if (v5_y not in Q_Ty1):\n with MAINT(Q_dT, 'after', 'Q_Ty1.add(v5_y)'):\n Q_Ty1.add(v5_y)\n _maint_Q_dT_Q_Ty1_add(v5_y)\n else:\n Q_Ty1.incref(v5_y)\n \n Q = RCSet()\n def _maint_Q_S_add(_e):\n for (v1_x, v1_y, v1_z) in COMP({(v1_x, v1_y, v1_z)\n for (v1_x, v1_y) in deltamatch(S, 'bb', _e, 1)\n for (v1_y, v1_z) in Q_dT},\n [], {'_deltaelem': '_e',\n '_deltalhs': '(v1_x, v1_y)',\n '_deltaop': 'add',\n '_deltarel': 'S',\n 'impl': 'auxonly'}):\n if ((v1_z, v1_x) not in Q):\n Q.add((v1_z, v1_x))\n else:\n Q.incref((v1_z, v1_x))\n \n with MAINT(Q, 'after', 'S.add(e)'):\n with MAINT(Q_Ty1, 'after', 'S.add(e)'):\n S.add(e)\n _maint_Q_Ty1_S_add(e)\n _maint_Q_S_add(e)\n print(setmatch(Q, 'bu', z))\n ''')\n \n self.assertEqual(tree, exp_tree)\n \n def test_deminc_nested(self):\n comp = L.pe(\n 'COMP({(x, y, z) for (x, y) in R '\n 'for (y, z) in DEMQUERY(foo, [y], A)}, '\n '[], '\n '{\"uset_force\": False})')\n tree = L.p('''\n R.add(e)\n print(COMP)\n ''', subst={'COMP': comp})\n tree = deminc_relcomp(tree, self.manager, comp, 'Q')\n tree = L.elim_deadfuncs(tree, lambda n: n.startswith('_maint_'))\n \n exp_tree = L.p('''\n foo_delta = RCSet()\n def _maint_foo_delta_Q_Ty_add(_e):\n for v7_y in COMP({v7_y\n for v7_y in deltamatch(Q_Ty, 'b', _e, 1)},\n [], {'_deltaelem': '_e',\n '_deltalhs': 'v7_y',\n '_deltaop': 'add',\n '_deltarel': 'Q_Ty',\n 'impl': 'auxonly'}):\n foo_delta.add(v7_y)\n \n Q_Ty = RCSet()\n def _maint_Q_Ty_R_add(_e):\n for (v5_x, v5_y) in COMP({(v5_x, v5_y)\n for (v5_x, v5_y) in deltamatch(R, 'bb', _e, 1)},\n [], {'_deltaelem': '_e',\n '_deltalhs': '(v5_x, v5_y)',\n '_deltaop': 'add',\n '_deltarel': 'R',\n 'impl': 'auxonly'}):\n if (v5_y not in Q_Ty):\n with MAINT(foo_delta, 'after', 'Q_Ty.add(v5_y)'):\n Q_Ty.add(v5_y)\n _maint_foo_delta_Q_Ty_add(v5_y)\n else:\n Q_Ty.incref(v5_y)\n \n Q = RCSet()\n def _maint_Q_R_add(_e):\n for (v1_x, v1_y, v1_z) in COMP({(v1_x, v1_y, v1_z)\n for (v1_x, v1_y) in deltamatch(R, 'bb', _e, 1)\n for (v1_y, v1_z) in A},\n [], {'_deltaelem': '_e',\n '_deltalhs': '(v1_x, v1_y)',\n '_deltaop': 'add',\n '_deltarel': 'R',\n 'impl': 'auxonly'}):\n Q.add((v1_x, v1_y, v1_z))\n \n with MAINT(demand_foo, 'after', 'R.add(e)'):\n with MAINT(Q, 'after', 'R.add(e)'):\n with MAINT(Q_Ty, 'after', 'R.add(e)'):\n R.add(e)\n _maint_Q_Ty_R_add(e)\n _maint_Q_R_add(e)\n for v9_y in foo_delta.elements():\n demand_foo(v9_y)\n foo_delta.clear()\n print(Q)\n ''')\n \n self.assertEqual(tree, exp_tree)\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.4649805426597595, "alphanum_fraction": 0.5077821016311646, "avg_line_length": 16.689655303955078, "blob_id": "1a8de841a07e98a26a303b3623ad42884ec42d57", "content_id": "5c2a198d61e2d162ffcc170920faa03c6bb4c025", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 514, "license_type": "no_license", "max_line_length": 71, "num_lines": 29, "path": "/incoq/tests/programs/aggr/nested/aggrdem_in.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "# Comprehension with a demand-driven aggregate.\n\nfrom incoq.runtime import *\n\nOPTIONS(\n default_impl = 'inc',\n)\n\nS = Set()\nE = Set()\n\nfor e in [1, 2, 4, 8]:\n S.add(e)\n\nfor e in [(1, 2), (1, 3), (2, 1), (3, 4), (8, 1), (8, 4)]:\n E.add(e)\n\nQUERYOPTIONS(\n '{y for x2, y in E if x2 == x}',\n uset_mode = 'all',\n impl = 'dem',\n)\n\nQUERYOPTIONS(\n '{x for x in S if x < sum({y for x2, y in E if x2 == x})}',\n impl = 'dem',\n)\n\nprint(sorted({x for x in S if x < sum({y for x2, y in E if x2 == x})}))\n\n" }, { "alpha_fraction": 0.75, "alphanum_fraction": 0.75, "avg_line_length": 28, "blob_id": "efafc5cd6337e2e3e0e3b7c1c1a7a40fc10017f3", "content_id": "b63f241c799aa0ca4ee28679c6214fa854861a5e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 28, "license_type": "no_license", "max_line_length": 28, "num_lines": 1, "path": "/experiments/rbac/constrainedrbac/__init__.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from .run_crbac_exp import *" }, { "alpha_fraction": 0.4019838273525238, "alphanum_fraction": 0.468546062707901, "avg_line_length": 33.83636474609375, "blob_id": "03b79474d28caa4389e8a67a8d9ff8431b987ec8", "content_id": "1f7dbf2037275fa3f69a1d5cd25c032ac5c5715b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3831, "license_type": "no_license", "max_line_length": 126, "num_lines": 110, "path": "/incoq/tests/programs/comp/sjaug_out.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from incoq.runtime import *\n# Comp1 := {z : (x, x) in E, (x, y) in E, (y, z) in S}\n_m_E_u1 = Map()\ndef _maint__m_E_u1_add(_e):\n (v11_1, v11_2) = _e\n if ((v11_1 == v11_2)):\n if (() not in _m_E_u1):\n _m_E_u1[()] = set()\n _m_E_u1[()].add(v11_1)\n\n_m_E_b1 = Map()\ndef _maint__m_E_b1_add(_e):\n (v9_1, v9_2) = _e\n if ((v9_1 == v9_2)):\n if (v9_1 not in _m_E_b1):\n _m_E_b1[v9_1] = set()\n _m_E_b1[v9_1].add(())\n\n_m_S_out = Map()\ndef _maint__m_S_out_add(_e):\n (v7_1, v7_2) = _e\n if (v7_1 not in _m_S_out):\n _m_S_out[v7_1] = set()\n _m_S_out[v7_1].add(v7_2)\n\ndef _maint__m_S_out_remove(_e):\n (v8_1, v8_2) = _e\n _m_S_out[v8_1].remove(v8_2)\n if (len(_m_S_out[v8_1]) == 0):\n del _m_S_out[v8_1]\n\n_m_E_out = Map()\ndef _maint__m_E_out_add(_e):\n (v5_1, v5_2) = _e\n if (v5_1 not in _m_E_out):\n _m_E_out[v5_1] = set()\n _m_E_out[v5_1].add(v5_2)\n\nComp1 = RCSet()\ndef _maint_Comp1_E_add(_e):\n # Iterate {(v1_x, v1_y, v1_z) : (v1_x, v1_x) in deltamatch(E, 'b1', _e, 0), (v1_x, v1_y) in (E + {_e}), (v1_y, v1_z) in S}\n for v1_x in setmatch({_e}, 'u1', ()):\n for v1_y in (_m_E_out[v1_x] if (v1_x in _m_E_out) else set()):\n for v1_z in (_m_S_out[v1_y] if (v1_y in _m_S_out) else set()):\n if (v1_z not in Comp1):\n Comp1.add(v1_z)\n else:\n Comp1.incref(v1_z)\n for v1_y in setmatch({_e}, 'bu', v1_x):\n for v1_z in (_m_S_out[v1_y] if (v1_y in _m_S_out) else set()):\n if (v1_z not in Comp1):\n Comp1.add(v1_z)\n else:\n Comp1.incref(v1_z)\n # Iterate {(v1_x, v1_y, v1_z) : (v1_x, v1_x) in E, (v1_x, v1_y) in deltamatch(E, 'bb', _e, 0), (v1_y, v1_z) in S}\n (v1_x, v1_y) = _e\n for _ in (_m_E_b1[v1_x] if (v1_x in _m_E_b1) else set()):\n for v1_z in (_m_S_out[v1_y] if (v1_y in _m_S_out) else set()):\n if (v1_z not in Comp1):\n Comp1.add(v1_z)\n else:\n Comp1.incref(v1_z)\n\ndef _maint_Comp1_S_add(_e):\n # Iterate {(v3_x, v3_y, v3_z) : (v3_x, v3_x) in E, (v3_x, v3_y) in E, (v3_y, v3_z) in deltamatch(S, 'bb', _e, 0)}\n (v3_y, v3_z) = _e\n for v3_x in (_m_E_u1[()] if (() in _m_E_u1) else set()):\n if ((v3_x, v3_y) in E):\n if (v3_z not in Comp1):\n Comp1.add(v3_z)\n else:\n Comp1.incref(v3_z)\n\ndef _maint_Comp1_S_remove(_e):\n # Iterate {(v4_x, v4_y, v4_z) : (v4_x, v4_x) in E, (v4_x, v4_y) in E, (v4_y, v4_z) in deltamatch(S, 'bb', _e, 0)}\n (v4_y, v4_z) = _e\n for v4_x in (_m_E_u1[()] if (() in _m_E_u1) else set()):\n if ((v4_x, v4_y) in E):\n if (Comp1.getref(v4_z) == 1):\n Comp1.remove(v4_z)\n else:\n Comp1.decref(v4_z)\n\nE = Set()\n# Begin maint Comp1 before \"S.add((1, 2))\"\n_maint_Comp1_S_add((1, 2))\n# End maint Comp1 before \"S.add((1, 2))\"\n# Begin maint _m_S_out after \"S.add((1, 2))\"\n_maint__m_S_out_add((1, 2))\n# End maint _m_S_out after \"S.add((1, 2))\"\n# Begin maint Comp1 before \"E.add((1, 1))\"\n_maint_Comp1_E_add((1, 1))\n# End maint Comp1 before \"E.add((1, 1))\"\nE.add((1, 1))\n# Begin maint _m_E_u1 after \"E.add((1, 1))\"\n_maint__m_E_u1_add((1, 1))\n# End maint _m_E_u1 after \"E.add((1, 1))\"\n# Begin maint _m_E_b1 after \"E.add((1, 1))\"\n_maint__m_E_b1_add((1, 1))\n# End maint _m_E_b1 after \"E.add((1, 1))\"\n# Begin maint _m_E_out after \"E.add((1, 1))\"\n_maint__m_E_out_add((1, 1))\n# End maint _m_E_out after \"E.add((1, 1))\"\n# Begin maint _m_S_out before \"S.remove((1, 2))\"\n_maint__m_S_out_remove((1, 2))\n# End maint _m_S_out before \"S.remove((1, 2))\"\n# Begin maint Comp1 after \"S.remove((1, 2))\"\n_maint_Comp1_S_remove((1, 2))\n# End maint Comp1 after \"S.remove((1, 2))\"\nprint(sorted(Comp1))" }, { "alpha_fraction": 0.46922463178634644, "alphanum_fraction": 0.5411670804023743, "avg_line_length": 35.27536392211914, "blob_id": "8a9387fad5ed1fc825af808f4bd529866cd1a74f", "content_id": "26f70b63715737868ba3d063d9f3031c85675c58", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2502, "license_type": "no_license", "max_line_length": 85, "num_lines": 69, "path": "/incoq/tests/programs/aggr/tuple_out.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from incoq.runtime import *\n# Aggr1 := count(R, None)\n# Aggr2 := count(setmatch(R, 'buu', a), None)\n_m_Aggr1_u = Map()\ndef _maint__m_Aggr1_u_add(_e):\n v7_1 = _e\n if (() not in _m_Aggr1_u):\n _m_Aggr1_u[()] = set()\n _m_Aggr1_u[()].add(v7_1)\n\ndef _maint__m_Aggr1_u_remove(_e):\n v8_1 = _e\n _m_Aggr1_u[()].remove(v8_1)\n if (len(_m_Aggr1_u[()]) == 0):\n del _m_Aggr1_u[()]\n\n_m_Aggr2_out = Map()\ndef _maint__m_Aggr2_out_add(_e):\n (v5_1, v5_2) = _e\n if (v5_1 not in _m_Aggr2_out):\n _m_Aggr2_out[v5_1] = set()\n _m_Aggr2_out[v5_1].add(v5_2)\n\ndef _maint__m_Aggr2_out_remove(_e):\n (v6_1, v6_2) = _e\n _m_Aggr2_out[v6_1].remove(v6_2)\n if (len(_m_Aggr2_out[v6_1]) == 0):\n del _m_Aggr2_out[v6_1]\n\ndef _maint_Aggr2_add(_e):\n (v3_v1, v3_v2, v3_v3) = _e\n v3_val = _m_Aggr2_out.singlelookup(v3_v1, (0, 0))\n (v3_state, v3_count) = v3_val\n v3_state = (v3_state + 1)\n v3_val = (v3_state, (v3_count + 1))\n v3_1 = v3_v1\n if (not (len((_m_Aggr2_out[v3_v1] if (v3_v1 in _m_Aggr2_out) else set())) == 0)):\n v3_elem = _m_Aggr2_out.singlelookup(v3_v1)\n # Begin maint _m_Aggr2_out before \"Aggr2.remove((v3_1, v3_elem))\"\n _maint__m_Aggr2_out_remove((v3_1, v3_elem))\n # End maint _m_Aggr2_out before \"Aggr2.remove((v3_1, v3_elem))\"\n # Begin maint _m_Aggr2_out after \"Aggr2.add((v3_1, v3_val))\"\n _maint__m_Aggr2_out_add((v3_1, v3_val))\n # End maint _m_Aggr2_out after \"Aggr2.add((v3_1, v3_val))\"\n\ndef _maint_Aggr1_add(_e):\n v1_val = _m_Aggr1_u.singlelookup((), (0, 0))\n (v1_state, v1_count) = v1_val\n v1_state = (v1_state + 1)\n v1_val = (v1_state, (v1_count + 1))\n if (not (len((_m_Aggr1_u[()] if (() in _m_Aggr1_u) else set())) == 0)):\n v1_elem = _m_Aggr1_u.singlelookup(())\n # Begin maint _m_Aggr1_u before \"Aggr1.remove(v1_elem)\"\n _maint__m_Aggr1_u_remove(v1_elem)\n # End maint _m_Aggr1_u before \"Aggr1.remove(v1_elem)\"\n # Begin maint _m_Aggr1_u after \"Aggr1.add(v1_val)\"\n _maint__m_Aggr1_u_add(v1_val)\n # End maint _m_Aggr1_u after \"Aggr1.add(v1_val)\"\n\nfor (x, y, z) in [(1, 2, 3), (1, 4, 5), (6, 7, 8)]:\n # Begin maint Aggr2 after \"R.add((x, y, z))\"\n _maint_Aggr2_add((x, y, z))\n # End maint Aggr2 after \"R.add((x, y, z))\"\n # Begin maint Aggr1 after \"R.add((x, y, z))\"\n _maint_Aggr1_add((x, y, z))\n # End maint Aggr1 after \"R.add((x, y, z))\"\na = 1\nprint(_m_Aggr1_u.singlelookup((), (0, 0))[0])\nprint(_m_Aggr2_out.singlelookup(a, (0, 0))[0])" }, { "alpha_fraction": 0.45086705684661865, "alphanum_fraction": 0.5183044075965881, "avg_line_length": 21.565217971801758, "blob_id": "b3f5e9deabcf7178258137627bef2898f4d25005", "content_id": "ee9a06f8a386125a8e6065ee9d6a16f8e04affa7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 519, "license_type": "no_license", "max_line_length": 83, "num_lines": 23, "path": "/incoq/tests/programs/comp/deltawildeq_in.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "# Check handling of deltas to enumerators with wildcards.\n\nfrom incoq.runtime import *\n\nQUERYOPTIONS(\n '{(x, w) for (x, x2, z) in S for (x3, w) in T if x == x2 if x2 == x3}',\n impl = 'inc',\n)\n\nS = Set()\nT = Set()\n\nfor v1, v2 in [(1, 3)]:\n T.add((v1, v2))\n\nfor v1, v2, v3 in [(1, 2, 2), (1, 1, 2)]:\n S.add((v1, v2, v3))\n\nprint(sorted({(x, w) for (x, x2, z) in S for (x3, w) in T if x == x2 if x2 == x3}))\n\nT.remove((1, 3))\n\nprint(sorted({(x, w) for (x, x2, z) in S for (x3, w) in T if x == x2 if x2 == x3}))\n" }, { "alpha_fraction": 0.75, "alphanum_fraction": 0.75, "avg_line_length": 31, "blob_id": "6b5c7b296a484baec2e8fe4a01412976764871a1", "content_id": "11533f2a17cfce64de50c7feaef09e892ceab206", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 32, "license_type": "no_license", "max_line_length": 31, "num_lines": 1, "path": "/experiments/distalgo/__init__.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from .run_distalgo_exp import *\n" }, { "alpha_fraction": 0.7722772359848022, "alphanum_fraction": 0.7722772359848022, "avg_line_length": 32.66666793823242, "blob_id": "2c40da7fb1ae38f3be5524c48717e0216710a796", "content_id": "57bf7fbcc4abdd83e3c5296a0bbc77373cfc6661", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 101, "license_type": "no_license", "max_line_length": 45, "num_lines": 3, "path": "/TODO.md", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "# Refactoring #\n- move unparser from incast into iast library\n- move mask stuff from set into incast\n" }, { "alpha_fraction": 0.4651122987270355, "alphanum_fraction": 0.5276873111724854, "avg_line_length": 37.3815803527832, "blob_id": "866ffe21ab8109be53e15bab0d4cdedb4546209f", "content_id": "300d6233b401311f28742ea15f814c3180888ea3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5833, "license_type": "no_license", "max_line_length": 140, "num_lines": 152, "path": "/incoq/tests/programs/deminc/objwild_out.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from incoq.runtime import *\n# Comp1 := {(S, S) : S in _U_Comp1, (S, _) in _M}\n# Comp1_TS := {S : S in _U_Comp1}\n# Comp1_d_M := {(S, _v1) : S in Comp1_TS, (S, _v1) in _M}\n_m_Comp1_out = Map()\ndef _maint__m_Comp1_out_add(_e):\n (v13_1, v13_2) = _e\n if (v13_1 not in _m_Comp1_out):\n _m_Comp1_out[v13_1] = set()\n _m_Comp1_out[v13_1].add(v13_2)\n\ndef _maint__m_Comp1_out_remove(_e):\n (v14_1, v14_2) = _e\n _m_Comp1_out[v14_1].remove(v14_2)\n if (len(_m_Comp1_out[v14_1]) == 0):\n del _m_Comp1_out[v14_1]\n\n_m_Comp1_d_M_bw = Map()\ndef _maint__m_Comp1_d_M_bw_add(_e):\n (v11_1, v11_2) = _e\n if (v11_1 not in _m_Comp1_d_M_bw):\n _m_Comp1_d_M_bw[v11_1] = RCSet()\n if (() not in _m_Comp1_d_M_bw[v11_1]):\n _m_Comp1_d_M_bw[v11_1].add(())\n else:\n _m_Comp1_d_M_bw[v11_1].incref(())\n\ndef _maint__m_Comp1_d_M_bw_remove(_e):\n (v12_1, v12_2) = _e\n if (_m_Comp1_d_M_bw[v12_1].getref(()) == 1):\n _m_Comp1_d_M_bw[v12_1].remove(())\n else:\n _m_Comp1_d_M_bw[v12_1].decref(())\n if (len(_m_Comp1_d_M_bw[v12_1]) == 0):\n del _m_Comp1_d_M_bw[v12_1]\n\ndef _maint_Comp1_d_M_Comp1_TS_add(_e):\n # Iterate {(v7_S, v7__v1) : v7_S in deltamatch(Comp1_TS, 'b', _e, 1), (v7_S, v7__v1) in _M}\n v7_S = _e\n if isinstance(v7_S, Set):\n for v7__v1 in v7_S:\n # Begin maint _m_Comp1_d_M_bw after \"Comp1_d_M.add((v7_S, v7__v1))\"\n _maint__m_Comp1_d_M_bw_add((v7_S, v7__v1))\n # End maint _m_Comp1_d_M_bw after \"Comp1_d_M.add((v7_S, v7__v1))\"\n\ndef _maint_Comp1_d_M_Comp1_TS_remove(_e):\n # Iterate {(v8_S, v8__v1) : v8_S in deltamatch(Comp1_TS, 'b', _e, 1), (v8_S, v8__v1) in _M}\n v8_S = _e\n if isinstance(v8_S, Set):\n for v8__v1 in v8_S:\n # Begin maint _m_Comp1_d_M_bw before \"Comp1_d_M.remove((v8_S, v8__v1))\"\n _maint__m_Comp1_d_M_bw_remove((v8_S, v8__v1))\n # End maint _m_Comp1_d_M_bw before \"Comp1_d_M.remove((v8_S, v8__v1))\"\n\ndef _maint_Comp1_d_M__M_add(_e):\n # Iterate {(v9_S, v9__v1) : v9_S in Comp1_TS, (v9_S, v9__v1) in deltamatch(_M, 'bb', _e, 1)}\n (v9_S, v9__v1) = _e\n if (v9_S in Comp1_TS):\n # Begin maint _m_Comp1_d_M_bw after \"Comp1_d_M.add((v9_S, v9__v1))\"\n _maint__m_Comp1_d_M_bw_add((v9_S, v9__v1))\n # End maint _m_Comp1_d_M_bw after \"Comp1_d_M.add((v9_S, v9__v1))\"\n\nComp1_TS = RCSet()\ndef _maint_Comp1_TS__U_Comp1_add(_e):\n # Iterate {v5_S : v5_S in deltamatch(_U_Comp1, 'b', _e, 1)}\n v5_S = _e\n Comp1_TS.add(v5_S)\n # Begin maint Comp1_d_M after \"Comp1_TS.add(v5_S)\"\n _maint_Comp1_d_M_Comp1_TS_add(v5_S)\n # End maint Comp1_d_M after \"Comp1_TS.add(v5_S)\"\n\ndef _maint_Comp1_TS__U_Comp1_remove(_e):\n # Iterate {v6_S : v6_S in deltamatch(_U_Comp1, 'b', _e, 1)}\n v6_S = _e\n # Begin maint Comp1_d_M before \"Comp1_TS.remove(v6_S)\"\n _maint_Comp1_d_M_Comp1_TS_remove(v6_S)\n # End maint Comp1_d_M before \"Comp1_TS.remove(v6_S)\"\n Comp1_TS.remove(v6_S)\n\ndef _maint_Comp1__U_Comp1_add(_e):\n # Iterate {v1_S : v1_S in deltamatch(_U_Comp1, 'b', _e, 1), (v1_S, _) in _M}\n v1_S = _e\n if isinstance(v1_S, Set):\n if (not (len(v1_S) == 0)):\n # Begin maint _m_Comp1_out after \"Comp1.add((v1_S, v1_S))\"\n _maint__m_Comp1_out_add((v1_S, v1_S))\n # End maint _m_Comp1_out after \"Comp1.add((v1_S, v1_S))\"\n\ndef _maint_Comp1__U_Comp1_remove(_e):\n # Iterate {v2_S : v2_S in deltamatch(_U_Comp1, 'b', _e, 1), (v2_S, _) in _M}\n v2_S = _e\n if isinstance(v2_S, Set):\n if (not (len(v2_S) == 0)):\n # Begin maint _m_Comp1_out before \"Comp1.remove((v2_S, v2_S))\"\n _maint__m_Comp1_out_remove((v2_S, v2_S))\n # End maint _m_Comp1_out before \"Comp1.remove((v2_S, v2_S))\"\n\ndef _maint_Comp1__M_add(_e):\n # Iterate {v3_S : v3_S in _U_Comp1, (v3_S, _) in deltamatch(Comp1_d_M, 'bw', _e, 1), (v3_S, _) in Comp1_d_M}\n for v3_S in setmatch(({_e} if ((_m_Comp1_d_M_bw[_e[0]] if (_e[0] in _m_Comp1_d_M_bw) else RCSet()).getref(()) == 1) else {}), 'uw', ()):\n if (v3_S in _U_Comp1):\n for _ in (_m_Comp1_d_M_bw[v3_S] if (v3_S in _m_Comp1_d_M_bw) else RCSet()):\n # Begin maint _m_Comp1_out after \"Comp1.add((v3_S, v3_S))\"\n _maint__m_Comp1_out_add((v3_S, v3_S))\n # End maint _m_Comp1_out after \"Comp1.add((v3_S, v3_S))\"\n\n_U_Comp1 = RCSet()\n_UEXT_Comp1 = Set()\ndef demand_Comp1(S):\n '{(S, S) : S in _U_Comp1, (S, _) in _M}'\n if (S not in _U_Comp1):\n _U_Comp1.add(S)\n # Begin maint Comp1_TS after \"_U_Comp1.add(S)\"\n _maint_Comp1_TS__U_Comp1_add(S)\n # End maint Comp1_TS after \"_U_Comp1.add(S)\"\n # Begin maint Comp1 after \"_U_Comp1.add(S)\"\n _maint_Comp1__U_Comp1_add(S)\n # End maint Comp1 after \"_U_Comp1.add(S)\"\n else:\n _U_Comp1.incref(S)\n\ndef undemand_Comp1(S):\n '{(S, S) : S in _U_Comp1, (S, _) in _M}'\n if (_U_Comp1.getref(S) == 1):\n # Begin maint Comp1 before \"_U_Comp1.remove(S)\"\n _maint_Comp1__U_Comp1_remove(S)\n # End maint Comp1 before \"_U_Comp1.remove(S)\"\n # Begin maint Comp1_TS before \"_U_Comp1.remove(S)\"\n _maint_Comp1_TS__U_Comp1_remove(S)\n # End maint Comp1_TS before \"_U_Comp1.remove(S)\"\n _U_Comp1.remove(S)\n else:\n _U_Comp1.decref(S)\n\ndef query_Comp1(S):\n '{(S, S) : S in _U_Comp1, (S, _) in _M}'\n if (S not in _UEXT_Comp1):\n _UEXT_Comp1.add(S)\n demand_Comp1(S)\n return True\n\nS = Set()\no = Obj()\no.a = 1\nS.add(o)\n# Begin maint Comp1_d_M after \"_M.add((S, o))\"\n_maint_Comp1_d_M__M_add((S, o))\n# End maint Comp1_d_M after \"_M.add((S, o))\"\n# Begin maint Comp1 after \"_M.add((S, o))\"\n_maint_Comp1__M_add((S, o))\n# End maint Comp1 after \"_M.add((S, o))\"\nprint(len_((query_Comp1(S) and (_m_Comp1_out[S] if (S in _m_Comp1_out) else set()))))" }, { "alpha_fraction": 0.4454001486301422, "alphanum_fraction": 0.4577412009239197, "avg_line_length": 31.609756469726562, "blob_id": "5ef962777c6e765538839020fd5c581515a1d085", "content_id": "791462afba6dad70c437c31342f6b112ffe862ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2674, "license_type": "no_license", "max_line_length": 76, "num_lines": 82, "path": "/incoq/tests/invinc/aggr/test_aggrcomp.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Unit tests for aggrcomp.py.\"\"\"\n\n\nimport unittest\n\nimport incoq.compiler.incast as L\nfrom incoq.compiler.central import CentralCase\nfrom incoq.compiler.aggr.aggrcomp import *\nfrom incoq.compiler.aggr.aggrcomp import LookupReplacer\n\n\nclass AggrcompCase(CentralCase):\n \n def test_replacer(self):\n look = L.pe('R.smlookup(\"bu\", x)')\n dem1 = L.pe('DEMQUERY(foo, [y], R.smlookup(\"bu\", y))')\n dem2 = L.pe('DEMQUERY(bar, [z], R.smlookup(\"bu\", z))')\n \n tree = L.pe('x + LOOK + DEM1 + DEM1 + DEM2',\n subst={'LOOK': look, 'DEM1': dem1, 'DEM2': dem2})\n namer = L.NameGenerator()\n replacer = LookupReplacer(namer)\n tree, clauses = replacer.process(tree)\n repls = replacer.repls\n \n exp_tree = L.pe('x + v1 + v2 + v2 + v3')\n exp_clauses = [\n L.Enumerator(L.sn('v1'),\n L.pe('{R.smlookup(\"bu\", x)}')),\n L.Enumerator(L.sn('v2'),\n L.pe('DEMQUERY(foo, [y], {R.smlookup(\"bu\", y)})')),\n L.Enumerator(L.sn('v3'),\n L.pe('DEMQUERY(bar, [z], {R.smlookup(\"bu\", z)})')),\n ]\n exp_repls = {\n look: 'v1',\n dem1: 'v2',\n dem2: 'v3',\n }\n \n self.assertEqual(tree, exp_tree)\n self.assertEqual(clauses, exp_clauses)\n self.assertEqual(repls, exp_repls)\n \n def test_flatten_smlookups_nodem(self):\n comp = L.pe(\n 'COMP({x for x in S '\n 'if Aggr1.smlookup(\"u\", ()) > 5}, '\n '[], {})')\n comp = flatten_smlookups(comp)\n # Ensure idempotence. We don't want to mess up an enumerator\n # in a maintenance comprehension.\n comp = flatten_smlookups(comp)\n \n exp_comp = L.pe(\n 'COMP({x for x in S '\n 'for _av1 in {Aggr1.smlookup(\"u\", ())} '\n 'if (_av1 > 5)}, '\n '[], {})')\n \n self.assertEqual(comp, exp_comp)\n \n def test_flatten_smlookups_dem(self):\n comp = L.pe(\n 'COMP({x for x in S '\n 'if DEMQUERY(foo, [u], Aggr1.smlookup(\"u\", ())) > 5}, '\n '[], {})')\n comp = flatten_smlookups(comp)\n comp = flatten_smlookups(comp)\n \n exp_comp = L.pe(\n 'COMP({x for x in S '\n 'for _av1 in DEMQUERY(foo, [u], '\n '{Aggr1.smlookup(\"u\", ())}) '\n 'if (_av1 > 5)}, '\n '[], {})')\n \n self.assertEqual(comp, exp_comp)\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.500220537185669, "alphanum_fraction": 0.5593295097351074, "avg_line_length": 35, "blob_id": "0a4f2840f0ae74c3244a932a7aec9ac87f9d8073", "content_id": "f6e1abd3a0a5cf20cca931f5ed54c57aa65a6bd4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2267, "license_type": "no_license", "max_line_length": 75, "num_lines": 63, "path": "/incoq/tests/programs/aggr/basic_out.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from incoq.runtime import *\n# Aggr1 := sum(R, None)\n_m_Aggr1_u = Map()\ndef _maint__m_Aggr1_u_add(_e):\n v3_1 = _e\n if (() not in _m_Aggr1_u):\n _m_Aggr1_u[()] = set()\n _m_Aggr1_u[()].add(v3_1)\n\ndef _maint__m_Aggr1_u_remove(_e):\n v4_1 = _e\n _m_Aggr1_u[()].remove(v4_1)\n if (len(_m_Aggr1_u[()]) == 0):\n del _m_Aggr1_u[()]\n\ndef _maint_Aggr1_add(_e):\n v1_v1 = _e\n v1_val = _m_Aggr1_u.singlelookup((), (0, 0))\n (v1_state, v1_count) = v1_val\n v1_state = (v1_state + v1_v1)\n v1_val = (v1_state, (v1_count + 1))\n if (not (len((_m_Aggr1_u[()] if (() in _m_Aggr1_u) else set())) == 0)):\n v1_elem = _m_Aggr1_u.singlelookup(())\n # Begin maint _m_Aggr1_u before \"Aggr1.remove(v1_elem)\"\n _maint__m_Aggr1_u_remove(v1_elem)\n # End maint _m_Aggr1_u before \"Aggr1.remove(v1_elem)\"\n # Begin maint _m_Aggr1_u after \"Aggr1.add(v1_val)\"\n _maint__m_Aggr1_u_add(v1_val)\n # End maint _m_Aggr1_u after \"Aggr1.add(v1_val)\"\n\ndef _maint_Aggr1_remove(_e):\n v2_v1 = _e\n v2_val = _m_Aggr1_u.singlelookup(())\n if (v2_val[1] == 1):\n v2_elem = _m_Aggr1_u.singlelookup(())\n # Begin maint _m_Aggr1_u before \"Aggr1.remove(v2_elem)\"\n _maint__m_Aggr1_u_remove(v2_elem)\n # End maint _m_Aggr1_u before \"Aggr1.remove(v2_elem)\"\n else:\n (v2_state, v2_count) = v2_val\n v2_state = (v2_state - v2_v1)\n v2_val = (v2_state, (v2_count - 1))\n v2_elem = _m_Aggr1_u.singlelookup(())\n # Begin maint _m_Aggr1_u before \"Aggr1.remove(v2_elem)\"\n _maint__m_Aggr1_u_remove(v2_elem)\n # End maint _m_Aggr1_u before \"Aggr1.remove(v2_elem)\"\n # Begin maint _m_Aggr1_u after \"Aggr1.add(v2_val)\"\n _maint__m_Aggr1_u_add(v2_val)\n # End maint _m_Aggr1_u after \"Aggr1.add(v2_val)\"\n\nfor x in [1, 2, 3, 4, 5]:\n # Begin maint Aggr1 after \"R.add(x)\"\n _maint_Aggr1_add(x)\n # End maint Aggr1 after \"R.add(x)\"\n# Begin maint Aggr1 before \"R.remove(5)\"\n_maint_Aggr1_remove(5)\n# End maint Aggr1 before \"R.remove(5)\"\nprint(_m_Aggr1_u.singlelookup((), (0, 0))[0])\nfor x in [1, 2, 3, 4]:\n # Begin maint Aggr1 before \"R.remove(x)\"\n _maint_Aggr1_remove(x)\n # End maint Aggr1 before \"R.remove(x)\"\nprint(_m_Aggr1_u.singlelookup((), (0, 0))[0])" }, { "alpha_fraction": 0.6326580047607422, "alphanum_fraction": 0.644717812538147, "avg_line_length": 26.098039627075195, "blob_id": "fb18dfa39e4fe8bea03e17016dab13d9e006cc07", "content_id": "c7656ed05657164e5bf833543cd582337374a963", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4146, "license_type": "no_license", "max_line_length": 70, "num_lines": 153, "path": "/experiments/run.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "import os\nimport traceback\n\nimport experiments.twitter as twitter\nimport experiments.jql as jql\nimport experiments.wifi as wifi\nimport experiments.django as django\nimport experiments.rbac.corerbac as corerbac\nimport experiments.rbac.constrainedrbac as crbac\nimport experiments.distalgo\n\nscaletime = twitter.ScaleTime()\nscaletime_poster = twitter.ScaleTimePoster()\nscalesize = twitter.ScaleSize()\nscalesize_poster = twitter.ScaleSizePoster()\n\ndemandtime = twitter.DemandTimeOps()\ndemandsize = twitter.DemandSize()\n\nfactor1a = twitter.Factor1ATimeNorm()\nfactor1b = twitter.Factor1BTimeNorm()\nfactor1c = twitter.Factor1CTimeNorm()\nfactor1d = twitter.Factor1DTimeNorm()\nfactor2a = twitter.Factor2ATimeNorm()\nfactor2b = twitter.Factor2BTimeNorm()\nfactor2c = twitter.Factor2CTimeNorm()\nfactor2d = twitter.Factor2DTimeNorm()\n\ntag = twitter.TagTime()\n\njqlratio1 = jql.Ratio1()\njqlratio2 = jql.Ratio2()\njqlratio3 = jql.Ratio3()\njqlscale1 = jql.Scale1()\njqlscale2 = jql.Scale2()\njqlscale2bigger = jql.Scale2Bigger()\njqlscale3 = jql.Scale3()\njqlscale3bigger = jql.Scale3Bigger()\n\nwifiscale = wifi.Wifi()\n\ndjangoscale = django.Scale()\ndjangodemand = django.DemandTime()\ndjangodemandnorm = django.DemandTimeNorm()\n\ncoreroles = corerbac.CoreRoles()\ncoredemand = corerbac.CoreDemand()\ncoredemandnorm = corerbac.CoreDemandNorm()\ncrbacscale = crbac.CRBACScale()\n\nclpaxos = experiments.distalgo.CLPaxos()\ncrleader = experiments.distalgo.CRLeader()\ndscrash = experiments.distalgo.DSCrash()\nhsleader = experiments.distalgo.HSLeader()\nlamutexspecprocs = experiments.distalgo.LAMutexSpecProcs()\nlamutexspecrounds = experiments.distalgo.LAMutexSpecRounds()\nlamutexspecoptprocs = experiments.distalgo.LAMutexSpecOptProcs()\nlamutexspecoptrounds = experiments.distalgo.LAMutexSpecOptRounds()\nlamutexorigprocs = experiments.distalgo.LAMutexOrigProcs()\nlamutexorigrounds = experiments.distalgo.LAMutexOrigRounds()\nlapaxos = experiments.distalgo.LAPaxos()\nramutex = experiments.distalgo.RAMutex()\nratokenprocs = experiments.distalgo.RATokenProcs()\nratokenrounds = experiments.distalgo.RATokenRounds()\nsktoken = experiments.distalgo.SKToken()\ntpcommit = experiments.distalgo.TPCommit()\nvrpaxos = experiments.distalgo.VRPaxos()\n\ndef main():\n \n ws = [\n # DLS14 figures\n# scaletime, # celeb_asymp_time\n# scalesize, # celeb_asymp_space\n# demandtime, # celeb_demand_time\n# demandsize, # celeb_demand_space\n# tag, # osq_auth_strategy\n \n# scaletime_poster,\n# scalesize_poster,\n \n # Other benchmarks:\n# factor1a,\n# factor1b,\n# factor1c,\n# factor1d,\n# factor2a,\n# factor2b,\n# factor2c,\n# factor2d,\n \n# jqlratio1,\n# jqlratio2, # jql_ratio\n# jqlratio3,\n# jqlscale1,\n# jqlscale2, # jql_asymp\n# jqlscale2bigger,\n# jqlscale3,\n# jqlscale3bigger,\n \n# coreroles,\n# coredemand,\n# coredemandnorm,\n \n# crbacscale,\n \n# wifiscale,\n# djangoscale,\n# djangodemand,\n# djangodemandnorm,\n \n# clpaxos,\n# crleader,\n# dscrash,\n# hsleader,\n# lamutexspecprocs,\n# lamutexspecrounds,\n# lamutexspecoptprocs,\n# lamutexspecoptrounds,\n# lamutexorigprocs,\n# lamutexorigrounds,\n# lapaxos,\n# ramutex,\n# ratokenprocs,\n# ratokenrounds\n# sktoken,\n# tpcommit,\n# vrpaxos,\n ]\n \n # Change to directory of this file so we can find the\n # results/ subdirectory.\n os.chdir(os.path.join('.', os.path.dirname(__file__)))\n \n for w in ws:\n print('\\n---- Running {} ----\\n'.format(w.__class__.__name__))\n try:\n# w.generate()\n# w.benchmark()\n \n# w.verify()\n \n w.extract()\n w.view()\n \n# w.cleanup()\n except Exception:\n traceback.print_exc()\n print('\\n^--- Skipping test\\n')\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.5126582384109497, "alphanum_fraction": 0.550632894039154, "avg_line_length": 17.58823585510254, "blob_id": "63453ac47d93164b78ff0df2d3a76f3d77fc2b80", "content_id": "43e5e841542dcd94cae24a0258a00e9f94f3ec60", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 316, "license_type": "no_license", "max_line_length": 61, "num_lines": 17, "path": "/incoq/tests/programs/comp/inconlyonce_in.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "# Don't incrementalize the same comp more than once if we can\n# avoid it.\n\nfrom incoq.runtime import *\n\nQUERYOPTIONS(\n '{x for (x, y) in E}',\n impl = 'inc',\n)\n\nE = Set()\n\nfor v1, v2 in [(1, 2), (1, 3), (2, 3), (3, 4)]:\n E.add((v1, v2))\n\nprint(sorted({x for (x, y) in E}))\nprint(sorted({x for (x, y) in E}))\n" }, { "alpha_fraction": 0.6851851940155029, "alphanum_fraction": 0.7037037014961243, "avg_line_length": 17, "blob_id": "be21190267f7b80a347ba922345a35074d7b48dc", "content_id": "dc1e76d953665c5ee5927d45665a513e84eef908", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 54, "license_type": "no_license", "max_line_length": 28, "num_lines": 3, "path": "/experiments/jql/jql_1_orig.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from .jql_1_in import *\n\ndo_query_nodemand = do_query\n" }, { "alpha_fraction": 0.47742369771003723, "alphanum_fraction": 0.48481228947639465, "avg_line_length": 28.44835090637207, "blob_id": "9d28561f55c75885e0a055cce6efa258fff0ee59", "content_id": "447340810ae881fb2e6ae8b2c5ccbcd413438c96", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13399, "license_type": "no_license", "max_line_length": 71, "num_lines": 455, "path": "/experiments/django/run_django_exp.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Run the OSQ-django experiments.\"\"\"\n\n\nimport os, sys, importlib\nfrom copy import deepcopy\nfrom random import randrange\n\nfrom frexp import (ExpWorkflow, Datagen,\n MetricExtractor, NormalizedExtractor)\n\nfrom experiments.util import SmallExtractor, LargeExtractor, canonize\n\n\nclass DjangoDatagen(Datagen):\n \n \"\"\"Test procedure follows OSQ section 9, authentication query.\n \n The user to query after each update is chosen randomly.\n Obviously, the group is marked active.\n \n Additional facility for only querying over a certain size\n subset of all users.\n \n Parameters:\n n_users -- number of users\n n_q_users -- number of queryable users.\n n_perms -- number of permissions, add operations, and queries\n \"\"\"\n \n def generate(self, P):\n n_users = P['n_users']\n n_q_users = P['n_q_users']\n n_perms = P['n_perms']\n \n assert n_q_users <= n_users\n \n qseq = [randrange(n_q_users) for _ in range(n_perms)]\n \n return dict(\n dsparams = P,\n qseq = qseq,\n )\n\n\nclass DjangoDriver:\n \n def __init__(self, pipe_filename):\n import gc\n import pickle\n \n gc.disable()\n \n with open(pipe_filename, 'rb') as pf:\n dataset, prog, other_tparams = pickle.load(pf)\n os.remove(pipe_filename)\n \n \n self.dataset = dataset\n self.prog = prog\n self.simplified_query = '_simp_' in prog\n self.module = None\n self.results = {}\n self.qseq = dataset['qseq']\n \n self.setUp()\n \n from frexp.util import StopWatch, user_time\n from time import process_time, perf_counter\n timer_user = StopWatch(user_time)\n timer_cpu = StopWatch(process_time)\n timer_wall = StopWatch(perf_counter)\n \n with timer_user, timer_cpu, timer_wall:\n self.run()\n \n import incoq.runtime\n self.results['size'] = incoq.runtime.get_total_structure_size(\n self.module.__dict__)\n self.results['time_user'] = timer_user.consume()\n self.results['time_cpu'] = timer_cpu.consume()\n self.results['time_wall'] = timer_wall.consume()\n \n self.results['stdmetric'] = self.results['time_cpu']\n \n self.tearDown()\n \n \n with open(pipe_filename, 'wb') as pf:\n pickle.dump(self.results, pf)\n \n def setUp(self):\n # Import driven program.\n dirname, filename = os.path.split(self.prog)\n if dirname:\n sys.path.append(dirname)\n try:\n self.module = importlib.import_module(\n 'experiments.django.' + filename)\n finally:\n if dirname:\n sys.path.pop()\n \n \n m = self.module\n ds = self.dataset\n \n # Populate dataset.\n self.group = m.make_group(True)\n self.users = []\n for i in range(ds['dsparams']['n_users']):\n u = m.make_user('u' + str(i))\n self.users.append(u)\n m.add_group(u, self.group)\n self.perms = []\n for i in range(ds['dsparams']['n_perms']):\n p = m.make_perm('p' + str(i))\n self.perms.append(p)\n \n # Preprocess operations.\n self.ops = []\n assert len(self.qseq) == len(self.perms)\n for i, ui in enumerate(self.qseq):\n # u is either a user or uid, depending on prog.\n u = self.users[ui]\n if not self.simplified_query:\n u = u.id\n self.ops.append((self.perms[i], u))\n \n # Query over each user that's ever queried\n # We should only have to query once to get users in the\n # U-set, but let's query each individual user just in\n # case the U-set strategy is altered.\n for _, u in self.ops:\n m.do_query(u)\n \n def run(self):\n g = self.group\n add_perm = self.module.add_perm\n do_query = self.module.do_query_nodemand\n \n for p, u in self.ops:\n add_perm(g, p)\n # For the implementations using the normal query,\n # u is a user uid. For the simplified query, it's\n # the actual user object itself.\n do_query(u)\n \n def tearDown(self):\n pass\n\nclass DjangoVerifyDriver:\n \n def __init__(self, pipe_filename):\n import gc\n import pickle\n \n gc.disable()\n \n with open(pipe_filename, 'rb') as pf:\n dataset, prog, other_tparams = pickle.load(pf)\n os.remove(pipe_filename)\n \n \n self.dataset = dataset\n self.prog = prog\n self.module = None\n self.results = {'output': []}\n self.qseq = dataset['qseq']\n \n self.setUp()\n \n from frexp.util import StopWatch, user_time\n from time import process_time, perf_counter\n timer_user = StopWatch(user_time)\n timer_cpu = StopWatch(process_time)\n timer_wall = StopWatch(perf_counter)\n \n with timer_user, timer_cpu, timer_wall:\n self.run()\n \n self.tearDown()\n \n self.results = canonize(self.results)\n \n \n with open(pipe_filename, 'wb') as pf:\n pickle.dump(self.results, pf)\n \n def setUp(self):\n # Import driven program.\n dirname, filename = os.path.split(self.prog)\n if dirname:\n sys.path.append(dirname)\n try:\n self.module = importlib.import_module(\n 'experiments.django.' + filename)\n finally:\n if dirname:\n sys.path.pop()\n \n \n m = self.module\n ds = self.dataset\n \n # Populate dataset.\n self.group = m.make_group(True)\n self.users = []\n for i in range(ds['dsparams']['n_users']):\n u = m.make_user('u' + str(i))\n self.users.append(u)\n m.add_group(u, self.group)\n self.perms = []\n for i in range(ds['dsparams']['n_perms']):\n p = m.make_perm('p' + str(i))\n self.perms.append(p)\n m.do_query(self.users[0].id)\n \n # Preprocess operations.\n self.ops = []\n assert len(self.qseq) == len(self.perms)\n for i, ui in enumerate(self.qseq):\n self.ops.append((self.perms[i], self.users[ui].id))\n \n def run(self):\n g = self.group\n add_perm = self.module.add_perm\n do_query = self.module.do_query_nodemand\n \n for p, uid in self.ops:\n add_perm(g, p)\n output = do_query(uid)\n self.results['output'].append(deepcopy(output))\n \n def tearDown(self):\n pass\n\n\nclass DjangoWorkflow(ExpWorkflow):\n \n ExpDatagen = DjangoDatagen\n ExpExtractor = SmallExtractor\n \n ExpDriver = DjangoDriver\n \n # Verify out-of-date, needs to account for\n # simplified query implementations.\n# ExpVerifyDriver = DjangoVerifyDriver\n \n require_ac = False ###\n\n\nclass Scale(DjangoWorkflow):\n \n \"\"\"Vary the number of permissions and queries.\n Use Tom's original parameters from the OSQ paper.\n \"\"\"\n \n ###\n prefix = 'results/django_scale_small'\n \n class ExpDatagen(DjangoWorkflow.ExpDatagen):\n \n progs = [\n 'django_orig',\n 'django_dem',\n 'django_osq',\n ]\n \n n_users_list = [100, 200, 300]\n batch_n_users = 100\n points = list(range(50, 500 + 1, 50))\n \n def get_dsparams_list(self):\n return [\n dict(\n dsid = str(n_users) + '_' + str(n_perms),\n x = n_perms,\n \n n_users = n_users,\n n_q_users = n_users,\n n_perms = n_perms,\n )\n for n_users in self.n_users_list\n for n_perms in self.points\n ]\n \n def get_tparams_list(self, dsparams_list):\n # Exclude all but one of the n_users possibilities\n # from running with Batch.\n return [\n dict(\n tid = dsp['dsid'],\n dsid = dsp['dsid'],\n prog = prog,\n )\n for prog in self.progs\n for dsp in dsparams_list\n if not (prog == 'django_orig' and\n dsp['n_users'] != self.batch_n_users)\n ]\n \n stddev_window = .1\n min_repeats = 50\n max_repeats = 50\n \n class ExpExtractor(DjangoWorkflow.ExpExtractor, MetricExtractor):\n \n # Post-processed below.\n _series = [\n ('django_orig', 'original',\n 'red', '- s poly2'),\n ('django_osq', 'OSQ',\n 'orange', '? _^ poly1'),\n ('django_dem', 'filtered',\n 'green', '? ^ poly1'),\n ]\n \n @property\n def rcparams(self):\n return dict(super().rcparams,\n **{'legend.handletextpad': .4,\n 'legend.borderaxespad': .2\n })\n \n linestyles = ['-', '--', ':']\n # Keep this in sync with the attribute of same name\n # in the Datagen.\n n_users_list = [100, 200, 300]\n \n @property\n def series(self):\n lss = self.linestyles\n new_series = []\n for sid, name, color, style in self._series:\n for i, n_users in enumerate(self.n_users_list):\n # If line style is '?', replace it with the\n # one associated with this number of users.\n ls, *rest = style.split()\n if ls == '?':\n ls = lss[i]\n new_style = ' '.join([ls] + rest)\n else:\n new_style = style\n new_name = name + ' (' + str(n_users) + ' users)'\n new_series.append(((sid, n_users), new_name,\n color, new_style))\n return new_series\n \n ylabel = 'Running time (in seconds)'\n xlabel = 'Number of permissions'\n \n metric = 'time_cpu'\n \n def get_series_data(self, datapoints, sid):\n inner_sid, n_users = sid\n datapoints = super().get_series_data(datapoints, inner_sid)\n datapoints = [p for p in datapoints\n if p['dsparams']['n_users'] == n_users]\n return datapoints\n \n xmin = 25\n xmax = 525\n ymin = 0\n ymax = 1.1\n\n\nclass Demand(DjangoWorkflow):\n \n \"\"\"Vary the number of users that are demanded.\"\"\"\n \n prefix = 'results/django_demand'\n \n class ExpDatagen(DjangoWorkflow.ExpDatagen):\n \n progs = [\n 'django_inc',\n 'django_dem',\n 'django_osq',\n 'django_simp_inc',\n 'django_simp_dem',\n 'django_simp_osq',\n ]\n \n def get_dsparams_list(self):\n return [\n dict(\n dsid = str(x),\n x = x,\n \n n_users = 300,\n n_q_users = x,\n n_perms = 500,\n )\n for x in [1, 10] + list(range(20, 300 + 1, 20))\n ]\n \n stddev_window = .1\n min_repeats = 50\n max_repeats = 50\n \n class ExpExtractor(DjangoWorkflow.ExpExtractor, MetricExtractor):\n \n series = [\n ('django_inc', 'incremental',\n 'blue', '- o normal'),\n ('django_osq', 'OSQ',\n 'orange', '-- ^ poly1'),\n ('django_dem', 'filtered',\n 'green', '- ^ poly1'),\n ('django_simp_inc', 'incremental (simplified)',\n 'blue', '- _o normal'),\n ('django_simp_osq', 'OSQ (simplified)',\n 'orange', '-- _^ normal'),\n ('django_simp_dem', 'filtered (simplified)',\n 'green', '- _^ normal'),\n ]\n \n xlabel = 'Number of demanded users'\n xmin = -10\n xmax = 310\n \n metric = 'time_cpu'\n\nclass DemandTime(Demand):\n \n class ExpExtractor(Demand.ExpExtractor):\n \n # Adjust geometry for external legend.\n width = 5.25\n figsize = (width, 2.625)\n tightlayout_bbox = (0, 0, 3.5/width, 1)\n legend_bbox = (1, 0, 1, 1)\n legend_loc = 'center left'\n \n ylabel = 'Running time (in seconds)'\n\nclass DemandTimeNorm(Demand):\n \n class ExpExtractor(Demand.ExpExtractor, NormalizedExtractor):\n \n base_sid_map = {\n 'django_dem': 'django_inc',\n 'django_osq': 'django_inc',\n 'django_simp_dem': 'django_simp_inc',\n 'django_simp_osq': 'django_simp_inc',\n }\n \n def normalize(self, pre_y, base_y):\n return pre_y / base_y\n \n legend_loc = 'lower right'\n \n ylabel = 'Running time (normalized)'\n \n y_ticklocs = [0, .5, 1, 1.5]\n" }, { "alpha_fraction": 0.48701298236846924, "alphanum_fraction": 0.4882698059082031, "avg_line_length": 33.84671401977539, "blob_id": "65deb153680e7545b1a1baa71885f9092dd389e9", "content_id": "8a5ac050133b5e5e6de9270e74b60f664dd449a9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4774, "license_type": "no_license", "max_line_length": 74, "num_lines": 137, "path": "/incoq/tests/invinc/obj/test_objcomp.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Unit tests for objcomp.py.\"\"\"\n\n\nimport unittest\n\nfrom incoq.util.collections import OrderedSet\nfrom incoq.compiler.comp import CompSpec\nimport incoq.compiler.incast as L\n\nfrom incoq.compiler.obj.objclause import ObjClauseFactory_Mixin as CF\nfrom incoq.compiler.obj.objcomp import *\nfrom incoq.compiler.obj.objcomp import (\n RetrievalReplacer, RetrievalExpander, flatten_retrievals,\n unflatten_retrievals, flatten_sets, unflatten_sets)\n\n\nclass ObjcompCase(unittest.TestCase):\n \n def test_retrieval_replacer(self):\n field_namer = lambda lhs, rhs: 'f_' + lhs + '_' + rhs\n map_namer = lambda lhs, rhs: 'm_' + lhs + '_' + rhs\n \n tree = L.pe('a.b[c.d].e + a[b[c]]')\n replacer = RetrievalReplacer(field_namer, map_namer)\n tree = replacer.process(tree)\n field_repls = replacer.field_repls\n map_repls = replacer.map_repls\n \n exp_tree = L.pe('f_m_f_a_b_f_c_d_e + m_a_m_b_c')\n exp_field_repls = [\n ('a', 'b', 'f_a_b'),\n ('c', 'd', 'f_c_d'),\n ('m_f_a_b_f_c_d', 'e', 'f_m_f_a_b_f_c_d_e'),\n ]\n exp_map_repls = [\n ('f_a_b', 'f_c_d', 'm_f_a_b_f_c_d'),\n ('b', 'c', 'm_b_c'),\n ('a', 'm_b_c', 'm_a_m_b_c'),\n ]\n \n self.assertEqual(tree, exp_tree)\n self.assertSequenceEqual(field_repls, exp_field_repls)\n self.assertSequenceEqual(map_repls, exp_map_repls)\n \n def test_retrieval_expander(self):\n tree = L.pe('f_f_m_a_b_c_d + foo')\n field_exps = {'f_f_m_a_b_c_d': ('f_m_a_b_c', 'd'),\n 'f_m_a_b_c': ('m_a_b', 'c')}\n map_exps = {'m_a_b': ('a', 'b')}\n tree = RetrievalExpander.run(tree, field_exps, map_exps)\n \n exp_tree = L.pe('a[b].c.d + foo')\n \n self.assertEqual(tree, exp_tree)\n \n def test_flatten_retrievals(self):\n comp = L.pe('COMP({x.a for x in S.b[c] if x.a > 5}, [S])')\n \n comp, seen_fields, seen_map = flatten_retrievals(comp)\n \n exp_comp = L.pe('''\n COMP({x_a for (S, S_b) in _F_b for (S_b, c, m_S_b_k_c) in _MAP\n for x in m_S_b_k_c for (x, x_a) in _F_a\n if x_a > 5}, [S])\n ''')\n exp_seen_fields = ['b', 'a']\n exp_seen_map = True\n \n self.assertEqual(comp, exp_comp)\n self.assertEqual(seen_fields, exp_seen_fields)\n self.assertEqual(seen_map, exp_seen_map)\n \n def test_unflatten_retrievals(self):\n comp = L.pe('''\n COMP({x_a for (S, S_b) in _F_b for (S_b, c, m_S_b_k_c) in _MAP\n for x in m_S_b_k_c for (x, x_a) in _F_a\n if x_a > 5}, [S])\n ''')\n comp = unflatten_retrievals(comp)\n \n exp_comp = L.pe('COMP({x.a for x in S.b[c] if x.a > 5}, [S])')\n \n self.assertEqual(comp, exp_comp) \n \n def test_flatten_unflatten_sets(self):\n comp = L.pe(\n 'COMP({x for (o, o_s) in _F_s for (o, o_t) in _F_t '\n 'for x in o_s if x in o_t if x in T}, [S, T])')\n \n flatcomp, use_mset = flatten_sets(comp, ['T'])\n \n exp_flatcomp = L.pe(\n 'COMP({x for (o, o_s) in _F_s for (o, o_t) in _F_t '\n 'for (o_s, x) in _M if (o_t, x) in _M if x in T}, '\n '[S, T])')\n \n self.assertEqual(flatcomp, exp_flatcomp)\n self.assertTrue(use_mset)\n \n unflatcomp = unflatten_sets(flatcomp)\n self.assertEqual(unflatcomp, comp)\n \n def test_pattern_unflatten(self):\n # Test that patternizing/depatternizing interacts well with\n # object clauses.\n \n comp = L.pe(\n 'COMP({x for x in S if x in T for y in S '\n 'for y2 in x if y == y2}, '\n '[S, T], {})')\n comp, _use_mset, _fields, _use_map = flatten_comp(comp, [])\n \n spec = CompSpec.from_comp(comp, CF)\n spec = spec.to_pattern()\n spec = spec.to_nonpattern()\n comp = spec.to_comp({})\n comp = unflatten_comp(comp)\n \n exp_comp = L.pe(\n 'COMP({x for x in S if x in T for y in S if y in x}, '\n '[S, T], {})')\n \n self.assertEqual(comp, exp_comp)\n \n def test_unflatten_subclause(self):\n # Make sure we don't do anything foolish when presented with\n # a subtractive enum.\n comp = L.pe(\n 'COMP({z for (x, y) in _M for (y, z) in _M - {e}}, [])')\n comp = unflatten_comp(comp)\n exp_comp = L.pe(\n 'COMP({z for y in x for (y, z) in _M - {e}}, [])')\n self.assertEqual(comp, exp_comp)\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.5764554142951965, "alphanum_fraction": 0.5779753923416138, "avg_line_length": 27.982378005981445, "blob_id": "060c7698638d005ec6d69d7d0bfca530ff58b729", "content_id": "ee6658b13d294322ab0327abd84555695b2c04bf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13158, "license_type": "no_license", "max_line_length": 76, "num_lines": 454, "path": "/incoq/compiler/incast/types.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Type analysis.\"\"\"\n\n\n__all__ = [\n 'FrozenDictField',\n \n 'Type',\n 'toptype',\n 'bottomtype',\n 'PrimitiveType',\n 'booltype',\n 'numbertype',\n 'strtype',\n 'SeqType',\n 'TupleType',\n 'ListType',\n 'SetType',\n 'DictType',\n 'ObjType',\n 'RestrictiveType',\n 'RefineType',\n 'EnumType',\n 'TypeVar',\n \n 'eval_typestr',\n 'parse_typestr',\n \n 'TypedUnparser',\n 'unparse_structast_typed',\n]\n\n\nfrom numbers import Number\nfrom simplestruct import Struct, Field, TypedField\n\nfrom incoq.util.collections import frozendict\n\nfrom .nodes import (AST, expr, Not, Index, Num, Enumerator, Load, Store,\n Eq, NotEq, Lt, LtE, Gt, GtE, Is, IsNot, In, NotIn)\nfrom .structconv import NodeTransformer, AdvNodeVisitor, Unparser\nfrom .util import NameGenerator\n\n\nclass FrozenDictField(TypedField):\n \n \"\"\"Field for frozendicts, with coercion from ordinary dicts.\"\"\"\n \n def __init__(self, keytype=None, valuetype=None):\n super().__init__(frozendict)\n self.keytype = keytype\n self.valuetype = valuetype\n \n def copy(self):\n return type(self)(self.keytype, self.valuetype)\n \n def check(self, inst, value):\n if (self.keytype is not None and\n not all(isinstance(k, self.keytype) for k in value.keys())):\n raise TypeError('Key with bad type')\n if (self.valuetype is not None and\n not all(isinstance(v, self.valuetype) for v in value.values())):\n raise TypeError('Value with bad type')\n \n def normalize(self, inst, value):\n if not isinstance(value, frozendict):\n return frozendict(value)\n return value\n\n\nclass Type(Struct):\n \n def issubtype(self, other):\n \"\"\"Return true if this type is a (non-strict) subtype of\n the other.\n \"\"\"\n if self is bottomtype or other is toptype:\n return True\n elif self == other:\n return True\n else:\n return self.issubtype_helper(other)\n \n def issubtype_helper(self, other):\n return False\n \n def matches(self, other):\n \"\"\"True if this type and other have the same functor symbol\n and arity.\n \"\"\"\n if type(self) == type(other):\n return self.matches_helper(other)\n else:\n return False\n \n def matches_helper(self, other):\n return True\n \n def match_against(self, other):\n \"\"\"Return constraints for self <= other to hold, when\n self matches other. Return value is a list of constraints\n lhs <= rhs each represented as a pair (lhs, rhs).\n \"\"\"\n assert self.matches(other)\n return self.match_against_helper(other)\n \n def match_against_helper(self, other):\n return []\n \n def join(self, *others, inverted=False):\n \"\"\"Take the join (in the lattice sense) with one or more\n other types, returning a new type.\n \n If inverted is True, take the meet instead.\n \"\"\"\n t = self\n for o in others:\n t = t.join_one(o, inverted=inverted)\n return t\n \n def join_one(self, other, *, inverted=False):\n if self.issubtype(other):\n return other if not inverted else self\n elif other.issubtype(self):\n return self if not inverted else other\n elif self == other:\n return self\n else:\n return self.join_helper(other, inverted=inverted)\n \n def join_helper(self, other, *, inverted=False):\n \"\"\"Join with one other type of the same class as this one.\n If inverted use meet instead.\n \"\"\"\n return toptype if not inverted else bottomtype\n \n def meet(self, *others, inverted=False):\n \"\"\"Take the lattice meet with one or more other types,\n returning a new type.\"\"\"\n return self.join(*others, inverted=not inverted)\n \n def expand(self, store):\n \"\"\"Expand a type expression, given a typevar store\n (a mapping from typevar names to ground type expressions).\n \"\"\"\n return self\n \n def widen(self, limit):\n \"\"\"Return a widened type that replaces nested types with\n top if they are at least limit levels deep.\n \"\"\"\n if limit == 0:\n return toptype\n else:\n return self.widen_helper(limit)\n \n def widen_helper(self, limit):\n return self\n\nclass TopTypeClass(Type):\n \"\"\"No type info.\"\"\"\n \n singleton = None\n \n def __new__(cls):\n if cls.singleton is None:\n cls.singleton = super().__new__(cls)\n return cls.singleton\n \n def __str__(self):\n return 'Top'\ntoptype = TopTypeClass()\n\nclass BottomTypeClass(Type):\n \"\"\"Conflicting type info.\"\"\"\n \n singleton = None\n \n def __new__(cls):\n if cls.singleton is None:\n cls.singleton = super().__new__(cls)\n return cls.singleton\n \n def __str__(self):\n return 'Bottom'\nbottomtype = BottomTypeClass()\n\nclass PrimitiveType(Type):\n t = TypedField(type)\n \n def __str__(self):\n return self.t.__name__\nbooltype = PrimitiveType(bool)\nnumbertype = PrimitiveType(Number)\nstrtype = PrimitiveType(str)\n\nclass TupleType(Type):\n ets = TypedField(Type, seq=True)\n \n def __str__(self):\n return '(' + ', '.join(str(et) for et in self.ets) + ')'\n \n def issubtype_helper(self, other):\n if type(self) != type(other):\n return False\n if len(self.ets) != len(other.ets):\n return False\n return all(et1.issubtype(et2)\n for et1, et2 in zip(self.ets, other.ets))\n \n def matches_helper(self, other):\n return len(self.ets) == len(other.ets)\n \n def match_against_helper(self, other):\n return [(et1, et2) for et1, et2 in zip(self.ets, other.ets)]\n \n def join_helper(self, other, *, inverted=False):\n top = toptype if not inverted else bottomtype\n if type(self) != type(other):\n return top\n if len(self.ets) != len(other.ets):\n return top\n new_ets = [et1.join(et2, inverted=inverted)\n for et1, et2 in zip(self.ets, other.ets)]\n return self._replace(ets=new_ets)\n \n def expand(self, store):\n new_ets = [et.expand(store) for et in self.ets]\n return self._replace(ets=new_ets)\n \n def widen_helper(self, limit):\n new_ets = [et.widen(limit - 1) for et in self.ets]\n return self._replace(ets=new_ets)\n\nclass SeqType(Type):\n et = TypedField(Type)\n brackets = '??'\n \n def __str__(self):\n return self.brackets[0] + str(self.et) + self.brackets[1]\n \n def issubtype_helper(self, other):\n if type(self) != type(other):\n return False\n return self.et.issubtype(other.et)\n \n def join_helper(self, other, inverted=False):\n if type(self) != type(other):\n return toptype if not inverted else bottomtype\n new_et = self.et.join(other.et, inverted=inverted)\n return self._replace(et=new_et)\n \n def match_against_helper(self, other):\n return [(self.et, other.et)]\n \n def expand(self, store):\n new_et = self.et.expand(store)\n return self._replace(et=new_et)\n \n def widen_helper(self, limit):\n new_et = self.et.widen(limit - 1)\n return self._replace(et=new_et)\n\nclass ListType(SeqType):\n _inherit_fields = True\n brackets = '[]'\n\nclass SetType(SeqType):\n _inherit_fields = True\n brackets = '{}'\n\nclass DictType(Type):\n kt = TypedField(Type)\n vt = TypedField(Type)\n \n # Contravariant key types were also considered as a possibility.\n # This would affect each of the helpers below.\n \n def __str__(self): \n return '{' + str(self.kt) + ': ' + str(self.vt) + '}'\n \n def issubtype_helper(self, other):\n if type(self) != type(other):\n return False\n return (self.kt.issubtype(other.kt) and\n self.vt.issubtype(other.vt))\n \n def join_helper(self, other, *, inverted=False):\n if type(self) != type(other):\n return toptype if not inverted else bottomtype\n new_kt = self.kt.join(other.kt, inverted=inverted)\n new_vt = self.vt.join(other.vt, inverted=inverted)\n return self._replace(kt=new_kt, vt=new_vt)\n \n def match_against_helper(self, other):\n return [(self.kt, self.kt), (other.vt, other.vt)]\n \n def expand(self, store):\n new_kt = self.kt.expand(store)\n new_vt = self.vt.expand(store)\n return self._replace(kt=new_kt, vt=new_vt)\n \n def widen_helper(self, limit):\n new_kt = self.kt.widen(limit - 1)\n new_vt = self.vt.widen(limit - 1)\n return self._replace(kt=new_kt, vt=new_vt)\n\nclass ObjType(Type):\n name = TypedField(str)\n \n def __str__(self):\n return self.name\n \n def matches_helper(self, other):\n return False\n\nclass RestrictiveType(Type):\n name = TypedField(str)\n base = TypedField(Type)\n \n def __str(self):\n return self.name\n \n def issubtype_helper(self, other):\n return self.base.issubtype(other)\n \n def matches_helper(self, other):\n return False\n \n def join_helper(self, other, *, inverted=False):\n # My join with any type that's not directly comparable\n # is the same as my base's join with that type, since\n # my base is my only direct ancestor.\n #\n # My meet with any type that's not directly comparable\n # is bottom, since the only possible non-bottom subtypes\n # are other refinements, which would have no ancestors\n # that are incomparable to me.\n if not inverted:\n return self.base.join(other, inverted=inverted)\n else:\n return bottomtype\n \n def expand(self, store):\n new_base = self.base.expand(store)\n return self._replace(base=new_base)\n \n def widen_helper(self, limit):\n # Rather than lose information in the base, it's probably\n # better to just replace ourselves with the base.\n widened_base = self.base.widen(limit - 1)\n if self.base != widened_base:\n new_type = self.base.widen(limit)\n else:\n new_type = self\n return new_type\n\nclass RefineType(RestrictiveType):\n _inherit_fields = True\n\nclass EnumType(RestrictiveType):\n _inherit_fields = True\n \n\nclass TypeVar(Type):\n name = TypedField(str)\n \n def __str__(self):\n return '<' + self.name + '>'\n \n def illegalop(self, other):\n raise NotImplementedError('Illegal operation for non-ground '\n 'type expression')\n \n issubtype = illegalop\n matches = illegalop\n match_against = illegalop\n join = illegalop\n \n def expand(self, store):\n t = store[self.name]\n assert isinstance(t, Type)\n return t\n\n\ndef eval_typestr(s):\n \"\"\"eval() a string representing a type expression.\"\"\"\n ns = {k: v for k, v in globals().items()\n if isinstance(v, Type) or\n (isinstance(v, type) and issubclass(v, Type))}\n return eval(s, ns)\n\ndef parse_typestr(s):\n \"\"\"Parse a string representing a type expression.\"\"\"\n ns = {\n 'top': toptype,\n 'bottom': bottomtype,\n 'bool': booltype,\n 'number': numbertype,\n 'str': strtype,\n 'tuple': TupleType,\n 'list': ListType,\n 'set': SetType,\n 'dict': DictType,\n 'obj': ObjType,\n 'subtype': RefineType,\n 'enum': EnumType,\n }\n return eval(s, ns)\n\n\nclass TypedUnparser(Unparser):\n \n \"\"\"Unparser that includes type information for all expression\n nodes.\n \"\"\"\n \n def dispatch(self, tree):\n show_type = isinstance(tree, expr) and tree.type is not None\n \n if show_type:\n self.write('(')\n super().dispatch(tree)\n if show_type:\n self.write(' : ' + str(tree.type) + ')')\n\ndef unparse_structast_typed(tree):\n \"\"\"As structconv.unparse_structast(), but with type info.\"\"\"\n return TypedUnparser.to_source(tree)\n\n\ndef add_fresh_typevars(tree):\n \"\"\"Add fresh typevars to all expression nodes.\"\"\"\n namegen = NameGenerator('_T{}')\n tvars = set()\n class Trans(NodeTransformer):\n def visit(self, node):\n node = super().visit(node)\n if isinstance(node, expr):\n tname = next(namegen)\n node = node._replace(type=TypeVar(tname))\n tvars.add(tname)\n return node\n return Trans.run(tree), tvars\n\ndef subst_typevars(tree, store):\n \"\"\"Substitute typevars into all expression nodes.\"\"\"\n class Trans(NodeTransformer):\n def visit(self, node):\n node = super().visit(node)\n if isinstance(node, expr):\n assert isinstance(node.type, TypeVar)\n node = node._replace(type=store[node.type.name])\n return node\n return Trans.run(tree)\n" }, { "alpha_fraction": 0.6910569071769714, "alphanum_fraction": 0.6910569071769714, "avg_line_length": 16.571428298950195, "blob_id": "098c4e5ef52c0c8584ff9ef4db13c6f6b8823893", "content_id": "b897607912fab210f7e3e0cceca75e54c9f2597c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 123, "license_type": "no_license", "max_line_length": 43, "num_lines": 7, "path": "/incoq/compiler/set/__init__.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Simple set- and map-based operations.\"\"\"\n\n\n# Exports.\nfrom .mask import *\nfrom .setmatch import *\nfrom .auxmap import *\n" }, { "alpha_fraction": 0.4747706353664398, "alphanum_fraction": 0.5206422209739685, "avg_line_length": 17.16666603088379, "blob_id": "742d8d7fd642e8558ac05e8182ee7d12a5de3b1f", "content_id": "6ece04af00dcf47d470b13e81459b55c40a7e78f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 436, "license_type": "no_license", "max_line_length": 52, "num_lines": 24, "path": "/incoq/tests/programs/aggr/uset_in.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "# Aggregate of a comprehension with a U-set.\n\nfrom incoq.runtime import *\n\nOPTIONS(\n default_impl = 'inc',\n)\n\nQUERYOPTIONS(\n '{y for (x2, y) in E if x == x2 if y < L}',\n uset_mode = 'explicit',\n uset_params = ['L'],\n)\n\nE = Set()\n\nfor e in [(1, 2), (1, 3), (1, 15), (2, 4)]:\n E.add(e)\n\nL = 10\nx = 1\nprint(sum({y for (x2, y) in E if x == x2 if y < L}))\nE.remove((1, 3))\nprint(sum({y for (x2, y) in E if x == x2 if y < L}))\n" }, { "alpha_fraction": 0.39885222911834717, "alphanum_fraction": 0.40889525413513184, "avg_line_length": 28.02083396911621, "blob_id": "7cf6a58628e2e64f14ad7b35a99303c952a509c8", "content_id": "730c73253b03df5d7e6c2b0b516803a7d4ca7990", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1394, "license_type": "no_license", "max_line_length": 74, "num_lines": 48, "path": "/incoq/tests/invinc/tup/test_tupletrans.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Unit tests for tupletrans.py.\"\"\"\n\n\nimport unittest\n\nimport incoq.compiler.incast as L\nfrom incoq.compiler.tup.tupletrans import *\n\n\nclass TupletransCase(unittest.TestCase):\n \n def test_flatten_tuples_comp(self):\n comp = L.pe('COMP({(a, c) for (a, (b, c)) in R if f((a, (b, c))) '\n 'for ((a, b), (a, b, c)) in S}, '\n '[], {})')\n comp, trels = flatten_tuples_comp(comp)\n \n exp_comp = L.pe('''\n COMP({(a, c) for (a, _tup1) in R\n for (_tup1, b, c) in _TUP2\n if f((a, (b, c)))\n for (_tup2, _tup3) in S\n for (_tup2, a, b) in _TUP2\n for (_tup3, a, b, c) in _TUP3},\n [], {})\n ''')\n exp_trels = ['_TUP2', '_TUP3']\n \n self.assertEqual(comp, exp_comp)\n self.assertSequenceEqual(trels, exp_trels)\n \n def test_flatten_tuples(self):\n tree = L.p('''\n print(COMP({(a, c) for (a, (b, c)) in R}, [], {}))\n ''')\n tree = flatten_tuples(tree)\n \n exp_tree = L.p('''\n print(COMP({(a, c) for (a, _tup1) in R\n for (_tup1, b, c) in _TUP2},\n [], {}))\n ''')\n \n self.assertEqual(tree, exp_tree)\n\n\nif __name__ == '__main__':\n unittest.main()\n\n" }, { "alpha_fraction": 0.41584157943725586, "alphanum_fraction": 0.4895489513874054, "avg_line_length": 36.11224365234375, "blob_id": "722ae43265665af93f374474f3805c5c7bef4928", "content_id": "2d79bf20f7e6e64523572dd384135a9953c5b224", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3636, "license_type": "no_license", "max_line_length": 102, "num_lines": 98, "path": "/incoq/tests/programs/comp/nested/basic_out.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from incoq.runtime import *\n# Comp1 := {(a, c) : (a, b) in E, (b, c) in E}\n# Comp6 := {(x, z) : (x, y) in E, (y, z) in Comp1}\n_m_E_out = Map()\ndef _maint__m_E_out_add(_e):\n (v11_1, v11_2) = _e\n if (v11_1 not in _m_E_out):\n _m_E_out[v11_1] = set()\n _m_E_out[v11_1].add(v11_2)\n\n_m_E_in = Map()\ndef _maint__m_E_in_add(_e):\n (v9_1, v9_2) = _e\n if (v9_2 not in _m_E_in):\n _m_E_in[v9_2] = set()\n _m_E_in[v9_2].add(v9_1)\n\n_m_Comp1_out = Map()\ndef _maint__m_Comp1_out_add(_e):\n (v7_1, v7_2) = _e\n if (v7_1 not in _m_Comp1_out):\n _m_Comp1_out[v7_1] = set()\n _m_Comp1_out[v7_1].add(v7_2)\n\ndef _maint__m_Comp1_out_remove(_e):\n (v8_1, v8_2) = _e\n _m_Comp1_out[v8_1].remove(v8_2)\n if (len(_m_Comp1_out[v8_1]) == 0):\n del _m_Comp1_out[v8_1]\n\nComp6 = RCSet()\ndef _maint_Comp6_E_add(_e):\n # Iterate {(v3_x, v3_y, v3_z) : (v3_x, v3_y) in deltamatch(E, 'bb', _e, 1), (v3_y, v3_z) in Comp1}\n (v3_x, v3_y) = _e\n for v3_z in (_m_Comp1_out[v3_y] if (v3_y in _m_Comp1_out) else set()):\n if ((v3_x, v3_z) not in Comp6):\n Comp6.add((v3_x, v3_z))\n else:\n Comp6.incref((v3_x, v3_z))\n\ndef _maint_Comp6_Comp1_add(_e):\n # Iterate {(v5_x, v5_y, v5_z) : (v5_x, v5_y) in E, (v5_y, v5_z) in deltamatch(Comp1, 'bb', _e, 1)}\n (v5_y, v5_z) = _e\n for v5_x in (_m_E_in[v5_y] if (v5_y in _m_E_in) else set()):\n if ((v5_x, v5_z) not in Comp6):\n Comp6.add((v5_x, v5_z))\n else:\n Comp6.incref((v5_x, v5_z))\n\ndef _maint_Comp6_Comp1_remove(_e):\n # Iterate {(v6_x, v6_y, v6_z) : (v6_x, v6_y) in E, (v6_y, v6_z) in deltamatch(Comp1, 'bb', _e, 1)}\n (v6_y, v6_z) = _e\n for v6_x in (_m_E_in[v6_y] if (v6_y in _m_E_in) else set()):\n if (Comp6.getref((v6_x, v6_z)) == 1):\n Comp6.remove((v6_x, v6_z))\n else:\n Comp6.decref((v6_x, v6_z))\n\nComp1 = RCSet()\ndef _maint_Comp1_E_add(_e):\n v1_DAS = set()\n # Iterate {(v1_a, v1_b, v1_c) : (v1_a, v1_b) in deltamatch(E, 'bb', _e, 1), (v1_b, v1_c) in E}\n (v1_a, v1_b) = _e\n for v1_c in (_m_E_out[v1_b] if (v1_b in _m_E_out) else set()):\n if ((v1_a, v1_b, v1_c) not in v1_DAS):\n v1_DAS.add((v1_a, v1_b, v1_c))\n # Iterate {(v1_a, v1_b, v1_c) : (v1_a, v1_b) in E, (v1_b, v1_c) in deltamatch(E, 'bb', _e, 1)}\n (v1_b, v1_c) = _e\n for v1_a in (_m_E_in[v1_b] if (v1_b in _m_E_in) else set()):\n if ((v1_a, v1_b, v1_c) not in v1_DAS):\n v1_DAS.add((v1_a, v1_b, v1_c))\n for (v1_a, v1_b, v1_c) in v1_DAS:\n if ((v1_a, v1_c) not in Comp1):\n Comp1.add((v1_a, v1_c))\n # Begin maint _m_Comp1_out after \"Comp1.add((v1_a, v1_c))\"\n _maint__m_Comp1_out_add((v1_a, v1_c))\n # End maint _m_Comp1_out after \"Comp1.add((v1_a, v1_c))\"\n # Begin maint Comp6 after \"Comp1.add((v1_a, v1_c))\"\n _maint_Comp6_Comp1_add((v1_a, v1_c))\n # End maint Comp6 after \"Comp1.add((v1_a, v1_c))\"\n else:\n Comp1.incref((v1_a, v1_c))\n del v1_DAS\n\nfor (v1, v2) in [(1, 2), (2, 3), (3, 4), (4, 5)]:\n # Begin maint _m_E_out after \"E.add((v1, v2))\"\n _maint__m_E_out_add((v1, v2))\n # End maint _m_E_out after \"E.add((v1, v2))\"\n # Begin maint _m_E_in after \"E.add((v1, v2))\"\n _maint__m_E_in_add((v1, v2))\n # End maint _m_E_in after \"E.add((v1, v2))\"\n # Begin maint Comp6 after \"E.add((v1, v2))\"\n _maint_Comp6_E_add((v1, v2))\n # End maint Comp6 after \"E.add((v1, v2))\"\n # Begin maint Comp1 after \"E.add((v1, v2))\"\n _maint_Comp1_E_add((v1, v2))\n # End maint Comp1 after \"E.add((v1, v2))\"\nprint(sorted(Comp6))" }, { "alpha_fraction": 0.4043126702308655, "alphanum_fraction": 0.48194068670272827, "avg_line_length": 35.39215850830078, "blob_id": "9b373595bf9268b13861e723aff847a121577ec1", "content_id": "163c62cd188497c54f193700bcc9a987b9b4dd55", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1855, "license_type": "no_license", "max_line_length": 105, "num_lines": 51, "path": "/incoq/tests/programs/comp/deltaeq_out.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from incoq.runtime import *\n# Comp1 := {(x, w) : (x, x, z) in S, (z, w) in T}\n_m_S_u1b = Map()\ndef _maint__m_S_u1b_add(_e):\n (v7_1, v7_2, v7_3) = _e\n if ((v7_1 == v7_2)):\n if (v7_3 not in _m_S_u1b):\n _m_S_u1b[v7_3] = set()\n _m_S_u1b[v7_3].add(v7_1)\n\n_m_T_out = Map()\ndef _maint__m_T_out_add(_e):\n (v5_1, v5_2) = _e\n if (v5_1 not in _m_T_out):\n _m_T_out[v5_1] = set()\n _m_T_out[v5_1].add(v5_2)\n\nComp1 = RCSet()\ndef _maint_Comp1_S_add(_e):\n # Iterate {(v1_x, v1_z, v1_w) : (v1_x, v1_x, v1_z) in deltamatch(S, 'b1b', _e, 1), (v1_z, v1_w) in T}\n for (v1_x, v1_z) in setmatch({_e}, 'u1u', ()):\n for v1_w in (_m_T_out[v1_z] if (v1_z in _m_T_out) else set()):\n if ((v1_x, v1_w) not in Comp1):\n Comp1.add((v1_x, v1_w))\n else:\n Comp1.incref((v1_x, v1_w))\n\ndef _maint_Comp1_T_add(_e):\n # Iterate {(v3_x, v3_z, v3_w) : (v3_x, v3_x, v3_z) in S, (v3_z, v3_w) in deltamatch(T, 'bb', _e, 1)}\n (v3_z, v3_w) = _e\n for v3_x in (_m_S_u1b[v3_z] if (v3_z in _m_S_u1b) else set()):\n if ((v3_x, v3_w) not in Comp1):\n Comp1.add((v3_x, v3_w))\n else:\n Comp1.incref((v3_x, v3_w))\n\nfor (v1, v2) in [(2, 4), (3, 5)]:\n # Begin maint _m_T_out after \"T.add((v1, v2))\"\n _maint__m_T_out_add((v1, v2))\n # End maint _m_T_out after \"T.add((v1, v2))\"\n # Begin maint Comp1 after \"T.add((v1, v2))\"\n _maint_Comp1_T_add((v1, v2))\n # End maint Comp1 after \"T.add((v1, v2))\"\nfor (v1, v2, v3) in [(1, 1, 2), (1, 2, 3)]:\n # Begin maint _m_S_u1b after \"S.add((v1, v2, v3))\"\n _maint__m_S_u1b_add((v1, v2, v3))\n # End maint _m_S_u1b after \"S.add((v1, v2, v3))\"\n # Begin maint Comp1 after \"S.add((v1, v2, v3))\"\n _maint_Comp1_S_add((v1, v2, v3))\n # End maint Comp1 after \"S.add((v1, v2, v3))\"\nprint(sorted(Comp1))" }, { "alpha_fraction": 0.6226415038108826, "alphanum_fraction": 0.627081036567688, "avg_line_length": 25.5, "blob_id": "583ed7ccbb07972529ac61cbafd35ec99455912c", "content_id": "22a6c429ce0cefd586cadfa9b88edde471dbe86a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 901, "license_type": "no_license", "max_line_length": 70, "num_lines": 34, "path": "/experiments/jql/java/jqlexp/Course.java", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "// (C) Copyright Darren Willis, David James Pearce and James Noble 2005. \n// Permission to copy, use, modify, sell and distribute this software \n// is granted provided this copyright notice appears in all copies. \n// This software is provided \"as is\" without express or implied \n// warranty, and with no claim as to its suitability for any purpose.\n//\n// Email: darren.willis@mcs.vuw.ac.nz\n\npackage jqlexp;\n\nimport java.util.ArrayList;\nimport java.util.HashMap;\n\npublic class Course {\n @jql.core.Cachable\n public String name;\n \n public Course(String n) {\n name = n;\n }\n \n public String toString() {\n return \"Course(\" + name + \")\";\n }\n \n public ArrayList asTree() {\n ArrayList result = new ArrayList();\n result.add(\"Course\");\n HashMap<String, Object> attrs = new HashMap<String, Object>();\n attrs.put(\"name\", name);\n result.add(attrs);\n return result;\n }\n}\n" }, { "alpha_fraction": 0.392179399728775, "alphanum_fraction": 0.39256277680397034, "avg_line_length": 31.203702926635742, "blob_id": "f37c8ea7f6f4e71a4361703139fca067f3372290", "content_id": "ae68f0db9b2110830c0622ccc88c03387439066f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5217, "license_type": "no_license", "max_line_length": 73, "num_lines": 162, "path": "/incoq/tests/invinc/obj/test_match.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Unit tests for match.py.\"\"\"\n\n\nimport unittest\n\nimport incoq.compiler.incast as L\nfrom incoq.compiler.set import Mask\nfrom incoq.compiler.obj.match import *\n\n\nclass BindmatchCase(unittest.TestCase):\n \n def test_mset(self):\n code = mset_bindmatch(Mask.BB, ['x', 'y'], [], L.pc('pass'),\n typecheck=True)\n exp_code = L.pc('''\n if isinstance(x, Set):\n if (y in x):\n pass\n ''')\n self.assertEqual(code, exp_code)\n \n code = mset_bindmatch(Mask.BB, ['x', 'y'], [], L.pc('pass'),\n typecheck=False)\n exp_code = L.pc('''\n if (y in x):\n pass\n ''')\n self.assertEqual(code, exp_code)\n \n code = mset_bindmatch(Mask.OUT, ['x'], ['y'], L.pc('pass'),\n typecheck=False)\n exp_code = L.pc('''\n for y in x:\n pass\n ''')\n self.assertEqual(code, exp_code)\n \n code = mset_bindmatch(Mask.B1, ['x'], [], L.pc('pass'),\n typecheck=False)\n exp_code = L.pc('''\n if x in x:\n pass\n ''')\n self.assertEqual(code, exp_code)\n \n code = mset_bindmatch(Mask.BW, ['x'], [], L.pc('pass'),\n typecheck=False)\n exp_code = L.pc('''\n if not x.isempty():\n pass\n ''')\n self.assertEqual(code, exp_code)\n \n code = mset_bindmatch(Mask.IN, ['y'], ['x'], L.pc('pass'),\n typecheck=True)\n exp_code = L.pc('''\n for x in setmatch(_M, 'ub', y):\n pass\n ''')\n self.assertEqual(code, exp_code)\n \n with self.assertRaises(AssertionError):\n mset_bindmatch(Mask.UU, [], ['x', 'y'], L.pc('pass'),\n typecheck=True)\n \n def test_fset(self):\n code = fset_bindmatch('f', Mask.BB, ['x', 'y'], [], L.pc('pass'),\n typecheck=True)\n exp_code = L.pc('''\n if hasattr(x, 'f'):\n if x.f == y:\n pass\n ''')\n self.assertEqual(code, exp_code)\n \n code = fset_bindmatch('f', Mask.BB, ['x', 'y'], [], L.pc('pass'),\n typecheck=False)\n exp_code = L.pc('''\n if (x.f == y):\n pass\n ''')\n self.assertEqual(code, exp_code)\n \n code = fset_bindmatch('f', Mask.OUT, ['x'], ['y'], L.pc('pass'),\n typecheck=False)\n exp_code = L.pc('''\n y = x.f\n pass\n ''')\n self.assertEqual(code, exp_code)\n \n code = fset_bindmatch('f', Mask.B1, ['x'], [], L.pc('pass'),\n typecheck=False)\n exp_code = L.pc('''\n if x == x.f:\n pass\n ''')\n self.assertEqual(code, exp_code)\n \n code = fset_bindmatch('f', Mask.BW, ['x'], [], L.pc('pass'),\n typecheck=False)\n exp_code = L.pc('''\n if hasattr(x, 'f'):\n pass\n ''')\n self.assertEqual(code, exp_code)\n \n code = fset_bindmatch('f', Mask.IN, ['y'], ['x'], L.pc('pass'),\n typecheck=True)\n exp_code = L.pc('''\n for x in setmatch(_F_f, 'ub', y):\n pass\n ''')\n self.assertEqual(code, exp_code)\n \n with self.assertRaises(AssertionError):\n fset_bindmatch('f', Mask.UU, [], ['x', 'y'], L.pc('pass'),\n typecheck=True)\n \n def test_mapset(self):\n code = mapset_bindmatch(Mask('bbb'), ['x', 'y', 'z'], [],\n L.pc('pass'), typecheck=True)\n exp_code = L.pc('''\n if isinstance(x, Map):\n if y in x and x[y] == z:\n pass\n ''')\n self.assertEqual(code, exp_code)\n \n code = mapset_bindmatch(Mask('bbu'), ['x', 'y'], ['z'],\n L.pc('pass'), typecheck=False)\n exp_code = L.pc('''\n if y in x:\n z = x[y]\n pass\n ''')\n self.assertEqual(code, exp_code)\n \n code = mapset_bindmatch(Mask('buu'), ['x'], ['y', 'z'],\n L.pc('pass'), typecheck=False)\n exp_code = L.pc('''\n for y, z in x.items():\n pass\n ''')\n self.assertEqual(code, exp_code)\n \n with self.assertRaises(AssertionError):\n mapset_bindmatch(Mask('uuu'), [], ['x', 'y', 'z'],\n L.pc('pass'), typecheck=True)\n \n code = mapset_bindmatch(Mask('ubb'), ['y', 'z'], ['x'],\n L.pc('pass'), typecheck=True)\n exp_code = L.pc('''\n for x in setmatch(_MAP, 'ubb', (y, z)):\n pass\n ''')\n self.assertEqual(code, exp_code)\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.4166666567325592, "alphanum_fraction": 0.4947916567325592, "avg_line_length": 26.571428298950195, "blob_id": "e2141ee7e75099cde2baef623731add3389fc4c5", "content_id": "20e95e0e3afa68f87f76ebec85c9266f042bb98d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 192, "license_type": "no_license", "max_line_length": 57, "num_lines": 7, "path": "/incoq/tests/programs/comp/pattern_out.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from incoq.runtime import *\nE = Set()\nS = Set()\nfor (v1, v2) in {(1, 1), (1, 2), (1, 3), (2, 3), (3, 4)}:\n E.add((v1, v2))\nS.add(1)\nprint(sorted({x for (x, x) in E for x in S if (x in S)}))" }, { "alpha_fraction": 0.5511152148246765, "alphanum_fraction": 0.5622676610946655, "avg_line_length": 24.023256301879883, "blob_id": "6085b84837a99885c3c1862f5c89818362b8e7be", "content_id": "ccd9d8098f206443fc1de1bd3f0fb1d66f0a5e72", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1076, "license_type": "no_license", "max_line_length": 81, "num_lines": 43, "path": "/experiments/rbac/constrainedrbac/crbac_in.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "# Query that finds violations of ANSI RBAC's Static Separation of\n# Duties condition.\n\nfrom incoq.runtime import *\n\nUSERS = Set()\nUR = Set()\nSSDNC = Set()\nSSDNR = Set()\n\ndef add_user(user):\n USERS.add(user)\n\ndef remove_user(user):\n USERS.remove(user)\n\ndef add_ur(user, role):\n UR.add((user, role))\n\ndef remove_ur(user, role):\n UR.remove((user, role))\n\ndef add_ssdnc(name, c):\n SSDNC.add((name, c))\n\ndef remove_ssdnc(name, c):\n SSDNC.remove((name, c))\n\ndef add_ssdnr(name, role):\n SSDNR.add((name, role))\n\ndef remove_ssdnr(name, role):\n SSDNR.remove((name, role))\n\ndef do_query():\n return {(u, name) for u in USERS for (name, c) in SSDNC\n if count({r for (u2, r) in UR for (name2, r2) in SSDNR\n if u == u2 if name == name2 if r == r2}) >= c}\n\ndef do_query_nodemand():\n return NODEMAND({(u, name) for u in USERS for (name, c) in SSDNC\n if count({r for (u2, r) in UR for (name2, r2) in SSDNR\n if u == u2 if name == name2 if r == r2}) >= c})\n" }, { "alpha_fraction": 0.5541232228279114, "alphanum_fraction": 0.5552606582641602, "avg_line_length": 34.16666793823242, "blob_id": "1d9e36d22ffc8f511a8e946fc67357a21c2acd61", "content_id": "78f18378c719941d4b89dec869de0136190ba642", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15825, "license_type": "no_license", "max_line_length": 78, "num_lines": 450, "path": "/incoq/compiler/comp/compspec.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Internal representation and logic for comprehensions.\"\"\"\n\n\n__all__ = [\n 'CompSpec',\n \n 'for_rel_code',\n 'for_rels_union_code',\n 'for_rels_union_disjoint_code',\n 'make_comp_maint_code',\n]\n\n\nfrom simplestruct import Struct, TypedField\n\nfrom incoq.util.type import checktype\nfrom incoq.util.seq import pairs\nfrom incoq.util.collections import OrderedSet, SetDict\nfrom incoq.util.unify import apply_subst\nimport incoq.compiler.incast as L\n\nfrom .clause import Clause, EnumClause\nfrom .join import Join\n\n\nclass TupleTreeConstraintMaker(L.NodeVisitor):\n \n \"\"\"Return a set of constraint equations for an expression in a\n given domain, to the extent that this expression is a tuple tree\n of variable names. When non-tuple-non-variable components are\n found, do not emit constraints for that part.\n \"\"\"\n \n def __init__(self, rel, prefix):\n super().__init__()\n self.rel = rel\n \"\"\"Name of relation domain that the whole expression belongs to.\"\"\"\n self.prefix = prefix\n \n def process(self, tree):\n self.constrs = []\n \n self.path = []\n super().process(tree)\n assert len(self.path) == 0\n \n return self.constrs\n \n @property\n def current_domain(self):\n return '.'.join([self.rel] + [str(i) for i in self.path])\n \n def generic_visit(self, node):\n # Do not descend into other expression types.\n return\n \n def visit_Name(self, node):\n constr = (self.current_domain, self.prefix + node.id)\n self.constrs.append(constr)\n \n def visit_Tuple(self, node):\n # Produce the constraint that the current domain is a tuple\n # of correct arity.\n dom = self.current_domain\n subdoms = [dom + '.' + str(i) for i in range(1, len(node.elts) + 1)]\n constr = (dom, tuple(['<T>'] + subdoms))\n self.constrs.append(constr)\n \n # Produce constraints for each subterm.\n for i, elt in enumerate(node.elts, 1):\n self.path.append(i)\n self.visit(elt)\n self.path.pop()\n\n\nclass CompSpec(Struct):\n \n \"\"\"Internal format for comprehensions.\"\"\"\n \n join = TypedField(Join)\n resexp = TypedField(L.AST)\n params = TypedField(str, seq=True, unique=True)\n \n @classmethod\n def from_comp(cls, node, factory):\n \"\"\"Construct from Comp node. Requires a ClauseFactory.\"\"\"\n checktype(node, L.Comp)\n \n join = Join.from_comp(node, factory)\n \n return cls(join, node.resexp, node.params)\n \n def __new__(cls, join, resexp, params):\n if params is None:\n params = []\n return super().__new__(cls, join, resexp, params)\n \n def __init__(self, join, resexp, params):\n # Determine whether we can conservatively say the result\n # expression is duplicate safe. Used for rc-elimination\n # and for the special case of iterating over comps in\n # For loops.\n #\n # We know it's safe if the result expression is injective\n # and makes use of all local variables.\n localvars = set(self.join.enumvars) - set(self.params)\n resvars = set(L.VarsFinder.run(resexp, ignore_functions=True))\n self.is_duplicate_safe = (resvars.issuperset(localvars) and\n L.is_injective(resexp))\n \n def __str__(self):\n param_str = ((', '.join(self.params) + ' -> ')\n if len(self.params) > 0 else '')\n proj_str = L.ts(self.resexp)\n join_str = str(self.join)\n return '{}{{{} : {}}}'.format(param_str, proj_str, join_str)\n \n def to_comp(self, options):\n clauses = tuple(cl.to_AST() for cl in self.join.clauses)\n return L.Comp(resexp=self.resexp,\n clauses=clauses,\n params=self.params,\n options=options)\n \n def to_pattern(self):\n \"\"\"Produce a semantically equivalent CompSpec whose join\n utilizes pattern matching.\n \"\"\"\n # Unify variables.\n new_join, subst = self.join.elim_equalities(self.params)\n new_resexp = L.Templater.run(self.resexp, subst)\n resvars = L.VarsFinder.run(new_resexp, ignore_functions=True)\n \n # Make wildcards.\n keepvars = set(self.params) | set(resvars)\n new_join = new_join.make_wildcards(keepvars)\n \n return self._replace(join=new_join, resexp=new_resexp)\n \n def to_nonpattern(self):\n \"\"\"Opposite of to_pattern(). Produce a semantically equivalent\n CompSpec whose join does not rely on pattern matching.\n \"\"\"\n # Split variables.\n new_join = self.join.make_equalities(self.params)\n \n # Eliminate wildcards.\n new_join = new_join.elim_wildcards()\n \n return self._replace(join=new_join)\n \n def without_params(self, flat=False):\n \"\"\"Produce a CompSpec where the result expression is rewritten\n as a tuple of the parameters and the old result expression, and\n where the parameters are turned into locals.\n \n If flat is True, the old result expression must be a tuple,\n and the new one is formed by concatenating a tuple of the\n parameters with the old result expression.\n \"\"\"\n if flat:\n assert isinstance(self.resexp, L.Tuple)\n elts = self.params + self.resexp.elts\n else:\n elts = self.params + (self.resexp,)\n new_resexp = L.tuplify(elts)\n return self._replace(resexp=new_resexp, params=())\n \n def with_uset(self, uset_name, uset_params, *,\n force=False):\n \"\"\"Produce a CompSpec with an additional U-set constraint.\"\"\"\n if len(uset_params) == 0 and not force:\n return self\n \n uset_clause = EnumClause(uset_params, uset_name)\n new_clauses = (uset_clause,) + self.join.clauses\n new_join = self.join._replace(clauses=new_clauses)\n return self._replace(join=new_join)\n \n def get_uncon_params(self):\n \"\"\"Return a tuple of the unconstrained parameters. The U-set\n must at minimum contain these parameters.\n \n To find them, we traverse the clauses from left to right and\n add unconstrained parameters to the result set as they appear.\n This means that the clauses must be runnable in a left-to-right\n order; otherwise it is an error.\n \n If the query has cyclic constraints, there may be multiple\n possible minimal sets of parameters. The one corresponding to\n this left-to-right traversal is chosen.\n \"\"\"\n result = ()\n supported = set()\n for cl in self.join.clauses:\n # Vars that have an occurrence in this clause that is\n # not constrained by the clause.\n if cl.kind is Clause.KIND_ENUM:\n uncon_occ = OrderedSet(\n v for v, bindocc in zip(cl.enumlhs, cl.con_mask)\n if not bindocc if v != '_')\n else:\n uncon_occ = OrderedSet(cl.vars)\n \n # Add each new unconstrained var to the result.\n # They must be parameters.\n new_uncons = uncon_occ - supported\n for v in new_uncons:\n if v in self.params:\n if v not in result:\n result += (v,)\n else:\n raise AssertionError('Unconstrained var {} is not a '\n 'parameter'.format(v))\n \n # Any enumvar of this clause is now supported/constrained.\n supported.update(cl.enumvars)\n \n return result\n \n def get_domain_constraints(self, resultname):\n \"\"\"Return a sequence of equations representing domain\n constraints induced by this comprehension. The equations\n follow the form of util.unify. Enumeration variables are\n uniquely prefixed with resultname. resultname is also used\n to name the result relation.\n \n This method should be used after pattern rewriting has already\n been performed, so equality constraints are represented by\n common pattern variables.\n \"\"\"\n constrs = []\n prefix = resultname + '_'\n \n # Add constraints for each clause.\n for cl in self.join.clauses:\n constrs.extend(cl.get_domain_constrs(prefix))\n \n # Add constraints for result expression.\n resconstrs = TupleTreeConstraintMaker.run(\n self.resexp, resultname, prefix)\n constrs.extend(resconstrs)\n \n return constrs\n \n def get_membership_constraints(self):\n \"\"\"Return a mapping from enumeration variables to sets of\n dompaths they are constrained by.\n \"\"\"\n edges = set()\n for cl in self.join.clauses:\n edges.update(cl.get_membership_constrs())\n \n edges_buu = SetDict()\n for x, y, i in edges:\n edges_buu[x].add((y, i))\n \n # Recursive DFS to find all paths through edges.\n # Assumes edges have no cycles.\n def find(x):\n results = []\n for y, i in edges_buu[x]:\n if y in self.join.rels:\n results.append((y, [i]))\n else:\n for end, path in find(y):\n results.append((end, path + [i]))\n return results\n \n mapping = {}\n for x in self.join.enumvars:\n paths = find(x)\n mapping[x] = {'.'.join([y] + [str(i) for i in p if i is not None])\n for y, p in paths}\n \n return mapping\n\n\ndef for_rel_code(vars, iter, body):\n \"\"\"Generate code to run body once for each element in the valuation\n of iter. vars are bound to the components of each element. iter\n should evaluate to a relation or arity len(vars).\n \"\"\"\n return L.pc('''\n for VARS in ITER:\n BODY\n ''', subst={'VARS': L.tuplify(vars, lval=True),\n 'ITER': iter,\n '<c>BODY': body})\n\ndef for_rels_union_code(vars, iters, body, tempname, *,\n verify_disjoint=False):\n \"\"\"Generate code to run body once for each element in the union\n of the evaluations of iters. A temporary set is used to eliminate\n duplicates from the union.\n \"\"\"\n assert len(iters) > 0\n if len(iters) == 1:\n return for_rel_code(vars, iters[0], body)\n \n code = L.pc('''\n TEMPSET = set()\n ''', subst={'TEMPSET': tempname})\n \n for iter in iters:\n if verify_disjoint:\n template = L.trim('''\n for S_VARS in ITER:\n assert VARS not in TEMPSET\n TEMPSET.add(VARS)\n ''')\n else:\n template = L.trim('''\n for S_VARS in ITER:\n TEMPSET.nsadd(VARS)\n ''')\n code += L.pc(template,\n subst={'S_VARS': L.tuplify(vars, lval=True),\n 'ITER': iter,\n 'TEMPSET': tempname,\n 'VARS': L.tuplify(vars)})\n \n code += L.pc('''\n for VARS in TEMPSET:\n BODY\n del D_TEMPSET\n ''', subst={'VARS': L.tuplify(vars, lval=True),\n 'TEMPSET': tempname,\n '<c>BODY': body,\n 'D_TEMPSET': L.dn(tempname)})\n \n return code\n\ndef for_rels_union_disjoint_code(vars, iters, body):\n \"\"\"Generate code to run body once for each element in the union\n of the evaluations of iters. The union must be disjoint.\n \"\"\"\n assert len(iters) > 0\n if len(iters) == 1:\n return for_rel_code(vars, iters[0], body)\n \n code = ()\n for iter in iters:\n code += L.pc('''\n for VARS in ITER:\n BODY\n ''', subst={'VARS': L.tuplify(vars, lval=True),\n 'ITER': iter,\n '<c>BODY': body})\n \n return code\n\n\ndef make_comp_maint_code(spec, resrel, deltarel, op, elem, prefix, *,\n maint_impl, rc, selfjoin):\n \"\"\"Construct comprehension maintenance code. Return the code and\n a list of maintenance comprehensions used.\n \n spec:\n CompSpec of the comprehension to be computed incrementally.\n \n resrel:\n Name of the relation holding the saved result.\n \n deltarel:\n Name of the updated relation that triggered maintenance.\n \n op:\n Update operation ('add' or 'remove').\n \n elem:\n AST of the element added or removed to deltarel.\n \n prefix:\n The prefix to use for making fresh local variables.\n \n maint_impl:\n Value to use for the 'impl' option of emitted\n maintenance comprehensions ('batch' or 'auxonly').\n \n rc:\n Whether or not the incrementally computed comprehension\n uses reference counts ('yes', 'no', 'safe').\n \n selfjoin:\n Strategy for computing self-joins. Possible values:\n 'sub':\n use subtractive clauses\n (Code must be placed after addition / before removal.)\n 'aug':\n use augmented clauses\n (Code must be placed before addition / after removal.)\n 'das':\n use a differential assignment set\n 'assume_disjoint':\n naive, only valid if joins are disjoint\n 'assume_disjoint_verify':\n naive, use das to assert disjoint at runtime\n \"\"\"\n assert op in ['add', 'remove']\n assert maint_impl in ['batch', 'auxonly']\n assert rc in ['yes', 'no', 'safe']\n assert selfjoin in ['sub', 'aug', 'das', 'assume_disjoint',\n 'assume_disjoint_verify']\n \n assert deltarel in spec.join.rels\n \n if len(spec.params) > 0:\n raise ValueError('Cannot incrementalize comprehension with '\n 'parameters')\n \n # Get the maintenance comprehensions.\n disjoint_strat = (selfjoin if selfjoin in ['sub', 'aug']\n else 'das')\n maint_joins = spec.join.get_maint_joins(elem, deltarel, op, prefix,\n disjoint_strat=disjoint_strat)\n maint_comps = [j.to_comp({'impl': maint_impl})\n for j in maint_joins]\n # Get the maintenance joins' enumvars.\n assert all(j1.enumvars == j2.enumvars\n for j1, j2 in pairs(maint_joins))\n maint_projvars = maint_joins[0].enumvars\n \n # Decide whether the body is a normal update or\n # a reference-counted one.\n use_rc = {'yes': True,\n 'no': False,\n 'safe': not spec.is_duplicate_safe}[rc]\n if use_rc:\n op = 'rc' + op\n resvars = L.VarsFinder.run(spec.resexp, ignore_functions=True)\n resexp = L.prefix_names(spec.resexp, resvars, prefix)\n body = L.pc('''\n RES.OP(RESEXP)\n ''', subst={'RES': resrel,\n '@OP': op,\n 'RESEXP': resexp})\n \n # Create code according to the choice of self-join strategy.\n if selfjoin in ['sub', 'aug', 'assume_disjoint']:\n code = for_rels_union_disjoint_code(\n maint_projvars, maint_comps, body)\n else:\n dasprefix = prefix + 'DAS'\n ver_dis = selfjoin == 'assume_disjoint_verify'\n code = for_rels_union_code(\n maint_projvars, maint_comps, body,\n dasprefix, verify_disjoint=ver_dis)\n \n return code, maint_comps\n" }, { "alpha_fraction": 0.774193525314331, "alphanum_fraction": 0.774193525314331, "avg_line_length": 31, "blob_id": "3208df69393acfa3a033ebdd3fa5e87a8709189c", "content_id": "62c8adb95157a043322fab96f643136e4cd756f4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 31, "license_type": "no_license", "max_line_length": 31, "num_lines": 1, "path": "/experiments/rbac/corerbac/__init__.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from .run_corerbac_exp import *" }, { "alpha_fraction": 0.6241790056228638, "alphanum_fraction": 0.625, "avg_line_length": 27.828401565551758, "blob_id": "866c237cd0b649cde273161217ffcbb5e886eb03", "content_id": "592a9c70aac71687bf8f261ef65dfa2a30dc374a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4872, "license_type": "no_license", "max_line_length": 69, "num_lines": 169, "path": "/incoq/compiler/incast/structconv.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Conversion between Struct and native formats of PyASTs,\nand export of iast features, as well as parsing/unparsing.\n\"\"\"\n\n\n__all__ = [\n 'import_structast',\n 'export_structast',\n \n 'parse_structast',\n 'unparse_structast',\n \n # Re-exported from iast directly.\n 'trim',\n 'dump',\n 'NodeVisitor',\n 'AdvNodeVisitor',\n 'NodeTransformer',\n 'AdvNodeTransformer',\n 'Templater',\n 'PatternTransformer',\n 'MacroProcessor',\n 'ContextSetter',\n 'astargs',\n 'literal_eval',\n 'raw_match',\n 'match',\n 'MatchFailure',\n]\n\n\nimport ast\nfrom types import SimpleNamespace\n\nimport iast\nfrom iast import (trim, dump, NodeVisitor, NodeTransformer,\n AdvNodeVisitor, AdvNodeTransformer,\n raw_match, match, MatchFailure, PatternTransformer)\nfrom iast.python.python34 import (make_pattern, extract_tree,\n parse as _parse)\n\nfrom . import nodes\nfrom .nodes import native_nodes, incast_nodes, TypeAdder\nfrom . import unparse\n\n\n# Versions of the iast.python utilities that use our typed\n# nodes instead of the original untyped ones.\npyutil_all = iast.python.pyutil.get_all(nodes)\nTemplater = pyutil_all['Templater']\nMacroProcessor = pyutil_all['MacroProcessor']\nContextSetter = pyutil_all['ContextSetter']\nastargs = pyutil_all['astargs']\nliteral_eval = pyutil_all['literal_eval']\n\n\n# TODO: simplify StructImporter and make it not rely on old iast.\n\nclass StructImporter:\n \n \"\"\"Convert from native AST to Struct AST. Similar in purpose\n to iast.python.native.pyToStruct.\n \"\"\"\n \n # We're using custom visitor logic here. We can't use\n # iast.NodeTransformer because we're not a Struct AST yet.\n # We can't use ast.NodeTransformer because we need to handle\n # being called on sequences. We can't use iast.python.native.\n # pyToStruct because we need to handle the Comment node type.\n #\n # The custom logic simply maps a function across the tree,\n # with no need to worry about the splicing logic of\n # NodeTransformer (since the function is 1:1).\n \n def visit(self, tree):\n if isinstance(tree, ast.AST):\n return self.node_visit(tree)\n elif isinstance(tree, list):\n return self.seq_visit(tree)\n else:\n return tree\n \n def node_visit(self, node):\n new_fields = []\n for field in node._fields:\n value = getattr(node, field)\n new_value = self.visit(value)\n new_fields.append(new_value)\n \n new_nodetype = incast_nodes[node.__class__.__name__]\n \n # For expressions, add a type information field.\n if issubclass(new_nodetype, incast_nodes['expr']):\n new_fields.append(None)\n \n return new_nodetype(*new_fields)\n \n def seq_visit(self, seq):\n new_seq = []\n for item in seq:\n new_value = self.visit(item)\n new_seq.append(new_value)\n return tuple(new_seq)\n\n\nclass StructExporter(NodeTransformer):\n \n \"\"\"Convert from Struct AST to native AST. Similar in purpose\n to iast.python.native.structToPy.\"\"\"\n \n # We don't use iast.python.native.structToPy because we need\n # to handle the Comment node.\n \n def seq_visit(self, seq):\n new_seq = super().seq_visit(seq)\n return list(new_seq)\n \n def generic_visit(self, node):\n repls = {}\n for field in node._fields:\n value = getattr(node, field)\n result = self.visit(value)\n repls[field] = result\n \n new_nodetype = native_nodes[node.__class__.__name__]\n return new_nodetype(**repls)\n\ndef import_structast(tree):\n \"\"\"Convert from native AST to Struct AST.\"\"\"\n return StructImporter().visit(tree)\n\ndef export_structast(tree):\n \"\"\"Convert from Struct AST to native AST.\"\"\"\n return StructExporter.run(tree)\n\n\ndef parse_structast(source, *, mode=None, subst=None, patterns=False,\n types=True):\n \"\"\"Version of iast.python.native.parse() that also runs\n extract_tree(), subst(), and can be used on patterns.\n Type information for expressions is set to None.\n \"\"\"\n tree = _parse(source)\n tree = TypeAdder.run(tree)\n tree = extract_tree(tree, mode)\n if patterns:\n tree = make_pattern(tree)\n if subst is not None:\n tree = Templater.run(tree, subst)\n return tree\n\n\nclass Unparser(unparse.Unparser):\n \n # Add Comment printing to standard unparser.\n \n ast = SimpleNamespace(**incast_nodes)\n \n def _Comment(self, node):\n if node.text:\n lines = node.text.split('\\n')\n else:\n lines = []\n for line in lines:\n self.fill('# ' + line)\n\ndef unparse_structast(tree):\n \"\"\"Unparse from Struct AST to source code.\"\"\"\n return Unparser.to_source(tree)\n" }, { "alpha_fraction": 0.5286116003990173, "alphanum_fraction": 0.5296707153320312, "avg_line_length": 34.79976272583008, "blob_id": "7c1c95d8bd736d93fccb9a672e155c2552633541", "content_id": "ee66f0466e34fee303a22cf0ed1eb4957470933b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 30215, "license_type": "no_license", "max_line_length": 80, "num_lines": 844, "path": "/incoq/compiler/aggr/aggr.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Aggregate queries.\n\nAn aggregate's operand is either a SetMatch or a SetMatch wrapped in\na DemQuery node. A Name node is also allowed and treated as a SetMatch\nwith a null tuple as the key. If the operand does not use demand, the\naggregate may or may not use demand. If the operand does use demand,\nthe aggregate must as well. The parameters to the aggregate are always\nexactly the parameters to the operand, and if the aggregate uses demand,\nall of its parameters are demand-controlled.\n\nFor example when using demand for the aggregate, the queries\n\n sum(setmatch(R, mask, p))\n sum(R)\n sum(DEMQUERY(R, [p1], setmatch(R, \"bbu\", (p1, p2))))\n\nare replaced by demand-driven map lookups\n\n DEMQUERY(A, p, A[p])\n DEMQUERY(A, (), A[()])\n DEMQUERY(A, [p1, p2], A[p1, p2])\n\nIf the aggregate does not use demand, then the third case is disallowed\nand the first two become\n\n A[p] if p in A else 0\n A[()] if () in A else 0\n\nFor a non-demand-driven aggregate, the invariant is that the map has a\nkey for every combination of parameter values for which the SetMatch\nresult is non-empty. For a demand-driven one, the invariant is that\nthe map keys are exactly the same as the aggregate's demand set.\n\nThere is also a third option, \"half-demand\", for when the aggregate is\ndemand-driven but not the operand. In this case, the invariant is that\nthere is a key in the aggregate result map whenever that key is in the\nU-set or it is a key of the operand's setmatch. This has the benefit\nof having constant-time cost for demanding new values, since the only\ntime an answer isn't already in the map is when the corresponding set\nis empty anyway.\n\"\"\"\n\n\n__all__ = [\n 'AGGR_PREFIX',\n 'is_aggrrel',\n \n 'AggrSpec',\n 'IncAggr',\n 'inc_aggr',\n 'aggr_needs_batch',\n 'aggr_needs_dem',\n 'aggr_canuse_halfdemand',\n]\n\n\nfrom abc import ABCMeta, abstractmethod\n\nfrom simplestruct import Struct, Field, TypedField\nfrom simplestruct.type import checktype\n\nimport incoq.compiler.incast as L\nfrom incoq.compiler.set import Mask\n\n\nAGGR_PREFIX = 'Aggr'\n\ndef is_aggrrel(rel):\n return rel.startswith(AGGR_PREFIX)\n\n\nclass AggrSpec(Struct):\n \n \"\"\"Aggregate query specification.\"\"\"\n \n aggrop = TypedField(str)\n \"\"\"Aggregate operation.\"\"\"\n rel = TypedField(str)\n \"\"\"Operand relation.\"\"\"\n relmask = TypedField(Mask)\n \"\"\"Operand setmatch mask (Mask.U if operand is just a variable).\"\"\"\n params = TypedField(str, seq=True)\n \"\"\"Parameters.\"\"\"\n oper_demname = Field()\n \"\"\"Operand demand name, or None if operand does not use demand.\"\"\"\n oper_demparams = Field()\n \"\"\"Operand demand parameters, or None if operand does not use demand.\"\"\"\n \n @property\n def has_oper_demand(self):\n return self.oper_demname is not None\n \n @classmethod\n def from_node(cls, node):\n checktype(node, L.Aggregate)\n \n if isinstance(node.value, L.DemQuery):\n assert all(isinstance(a, L.Name) for a in node.value.args)\n oper_demparams = tuple(a.id for a in node.value.args)\n oper_demname = node.value.demname\n oper = node.value.value\n else:\n oper_demparams = None\n oper_demname = None\n oper = node.value\n \n if isinstance(oper, L.Name):\n rel = oper.id\n relmask = Mask.U\n params = ()\n \n elif (isinstance(oper, L.SetMatch) and\n isinstance(oper.target, L.Name) and\n L.is_vartuple(oper.key)):\n rel = oper.target.id\n relmask = Mask(oper.mask)\n params = L.get_vartuple(oper.key)\n \n else:\n raise L.ProgramError('Bad aggregate operand', node=node)\n \n return cls(node.op, rel, relmask, params,\n oper_demname, oper_demparams)\n \n def __init__(self, aggrop, rel, relmask, params,\n oper_demname, oper_demparams):\n assert self.aggrop in ['count', 'sum', 'min', 'max']\n \n # AST node representation.\n node = L.ln(rel)\n if len(params) > 0:\n node = L.SetMatch(node, relmask.make_node().s,\n L.tuplify(params))\n if oper_demname is not None:\n node = L.DemQuery(oper_demname,\n [L.ln(p) for p in oper_demparams], node)\n node = L.Aggregate(node, aggrop, None)\n self.node = node\n \n def __str__(self):\n return L.ts(self.node)\n \n def get_domain_constraints(self, resultname):\n \"\"\"As in CompSpec.get_domain_constraints(). Each component of\n our result relation besides the last one is equated to a\n position in the underlying operand relation.\n \"\"\"\n # Get positions of parameters in underlying operand setmatch.\n positions = [i for i, p in enumerate(self.relmask.parts, 1)\n if p == 'b']\n assert len(positions) == len(self.params)\n \n constrs = [(resultname + '.' + str(i),\n self.rel + '.' + str(j))\n for i, j in enumerate(positions, 1)]\n \n return constrs\n\n\nclass IncAggr(Struct):\n \n \"\"\"Info for incrementalizing an aggregate query.\"\"\"\n \n aggr = TypedField(L.Aggregate)\n \"\"\"Aggregate node.\"\"\"\n spec = TypedField(AggrSpec)\n \"\"\"Aggregate query info.\"\"\"\n name = TypedField(str)\n \"\"\"Result set name.\"\"\"\n demname = Field()\n \"\"\"Aggregate demand name, or None if not using demand.\"\"\"\n uset_lru = Field()\n \"\"\"None or an integer bound for LRU cache size.\"\"\"\n half_demand = TypedField(bool)\n \"\"\"If using demand and this is True, use the \"half-demand\"\n strategy.\n \"\"\"\n \n @property\n def has_demand(self):\n return self.demname is not None\n \n @property\n def tracks_counts(self):\n # Counts are needed to know when to remove entries from the\n # aggregate result map. If we're using the normal demand\n # strategy, we only remove entries when keys become undemanded,\n # so counts aren't needed.\n return not (self.has_demand and not self.half_demand)\n \n def __init__(self, aggr, spec, name, demname, uset_lru, half_demand):\n self.params = params = tuple(spec.params)\n \"\"\"Aggregate parameters (same as operand parameters).\n Also same as aggregate demand parameters.\n \"\"\"\n \n self.aggrmask = Mask.from_keylen(len(params))\n \"\"\"Aggregate result retrieval mask.\"\"\"\n \n self.oper_deltamask = spec.relmask.make_delta_mask()\n \"\"\"Mask for doing delta test upon change to aggregate operand.\"\"\"\n \n assert not (spec.has_oper_demand and not self.has_demand), \\\n 'Can\\'t have non-demand-driven aggregate over demand-driven ' \\\n 'operand'\n \n assert not (half_demand and not self.has_demand), \\\n 'Can\\'t use half-demand strategy when not using demand at all'\n\n\nclass AggrCodegen(metaclass=ABCMeta):\n \n \"\"\"Abstract base class for aggregate code generation.\"\"\"\n \n # We maintain in the aggregate result a map from parameters to\n # result state. The case where there are no parameters is\n # handled with the same logic, using the unit tuple as the key.\n #\n # The maintenance code templates are parameterized by code for\n # initializing and updating the result state. The code is provided\n # in subclasses.\n #\n # When both the aggregate and operand use demand, the order of\n # events for adding to U is to first bring the aggregate invariant\n # up-to-date with the current contents of the operand (in case the\n # operand is already demanded), and then to propagate demand to\n # the operand. For removing from U it's the opposite order.\n #\n # Terminology: A \"state\" is the information used to incrementally\n # track a single aggregate result for a single set. For simple\n # aggregates this is just the result itself. A \"mapval\" is\n # an entry in the range of an aggregate result map, which consists\n # of the state possibly paired with a count of how many entries are\n # in the underlying operand set. The count is used to cleanup the\n # map entry in the case that the aggregate is not demand-driven.\n \n def __init__(self, incaggr):\n self.incaggr = incaggr\n \n def make_addu_maint(self, prefix):\n \"\"\"Generate code for after an addition to U.\"\"\"\n incaggr = self.incaggr\n assert incaggr.has_demand\n spec = incaggr.spec\n mv_var = prefix + 'val'\n elemvar = prefix + 'elem'\n \n # If we're using half-demand, there's no demand to propagate\n # to the operand. All we need to do is add an entry with count\n # 0 if one is not already there.\n if incaggr.half_demand:\n return L.pc('''\n S_MV = A.smdeflookup(MASK, KEY, None)\n if MV is None:\n A.smassignkey(MASK, KEY, ZERO, PREFIX)\n ''', subst={'A': L.ln(incaggr.name),\n 'S_MV': L.sn(mv_var),\n 'MV': L.ln(mv_var),\n 'MASK': incaggr.aggrmask.make_node(),\n 'KEY': L.tuplify(incaggr.params),\n 'ZERO': self.make_zero_mapval_expr(),\n 'PREFIX': L.Str(prefix)})\n \n update_code = self.make_update_state_code(\n L.sn(mv_var), L.ln(mv_var), 'add',\n L.ln(elemvar), prefix)\n \n # Make operand demand function call, if operand uses demand.\n if spec.has_oper_demand:\n demfunc = L.N.demfunc(spec.oper_demname)\n call_demfunc = L.Call(L.ln(demfunc),\n tuple(L.ln(v) for v in spec.oper_demparams),\n (), None, None)\n propagate_code = (L.Expr(call_demfunc),)\n else:\n propagate_code = () \n \n code = L.pc('''\n S_MV = ZERO\n for S_ELEM in setmatch(R, RELMASK, PARAMS):\n UPDATE_MAPVAL\n A.smassignkey(AGGRMASK, KEY, MV, PREFIX)\n PROPAGATE_DEMAND\n ''', subst={'S_MV': L.sn(mv_var),\n 'ZERO': self.make_zero_mapval_expr(),\n 'S_ELEM': L.sn(elemvar),\n 'R': spec.rel,\n 'RELMASK': spec.relmask.make_node(),\n 'PARAMS': L.tuplify(spec.params),\n '<c>UPDATE_MAPVAL': update_code,\n 'A': L.ln(incaggr.name),\n 'AGGRMASK': incaggr.aggrmask.make_node(),\n 'KEY': L.tuplify(incaggr.params),\n 'MV': L.ln(mv_var),\n 'PREFIX': L.Str(prefix),\n '<c>PROPAGATE_DEMAND': propagate_code})\n \n return code\n \n def make_removeu_maint(self, prefix):\n \"\"\"Generate code for before a removal from U.\"\"\"\n incaggr = self.incaggr\n assert incaggr.has_demand\n spec = incaggr.spec\n mv_var = prefix + 'val'\n \n # If we're using half-demand, there's no demand to propagate\n # to the operand. All we need to do is determine whether to\n # do the removal by checking whether the count is 0.\n if incaggr.half_demand:\n return L.pc('''\n S_MV = A.smlookup(MASK, KEY)\n if COUNT == 0:\n A.smdelkey(MASK, KEY, PREFIX)\n ''', subst={'S_MV': L.sn(mv_var),\n 'COUNT': self.mapval_proj_count(L.ln(mv_var)),\n 'A': incaggr.name,\n 'MASK': incaggr.aggrmask.make_node(),\n 'KEY': L.tuplify(incaggr.params),\n 'PREFIX': L.Str(prefix)})\n \n # Generate operand undemand function call, if operand\n # uses demand.\n if spec.has_oper_demand:\n undemfunc = L.N.undemfunc(spec.oper_demname)\n call_undemfunc = L.Call(L.ln(undemfunc),\n tuple(L.ln(v) for v in spec.oper_demparams),\n (), None, None)\n propagate_code = (L.Expr(call_undemfunc),)\n else:\n propagate_code = () \n \n code = L.pc('''\n PROPAGATE_DEMAND\n A.smdelkey(MASK, KEY, PREFIX)\n ''', subst={'A': incaggr.name,\n 'MASK': incaggr.aggrmask.make_node(),\n 'KEY': L.tuplify(incaggr.params),\n 'PREFIX': L.Str(prefix),\n '<c>PROPAGATE_DEMAND': propagate_code})\n \n return code\n \n def make_oper_maint(self, prefix, op, elem):\n \"\"\"Generate code for an addition or removal update to the operand.\"\"\"\n incaggr = self.incaggr\n spec = incaggr.spec\n relmask = spec.relmask\n mv_var = prefix + 'val'\n uset = L.N.uset(incaggr.name)\n vars = tuple(prefix + 'v' + str(i)\n for i in range(1, len(relmask) + 1))\n bvars, uvars, _ = relmask.split_vars(vars)\n \n nextmapval_code = self.make_update_mapval_code(\n L.sn(mv_var), L.ln(mv_var),\n op, L.tuplify(uvars), prefix)\n \n subst = {'S_VARS': L.tuplify(vars, lval=True),\n 'ELEM': elem,\n 'KEY': L.tuplify(bvars),\n 'ZERO': self.make_zero_mapval_expr(),\n 'U': L.ln(uset),\n 'S_MV': L.sn(mv_var),\n 'A': incaggr.name,\n 'MASK': incaggr.aggrmask.make_node(),\n 'KEY': L.tuplify(bvars),\n '<c>NEXT_MAPVAL': nextmapval_code,\n 'MV': L.ln(mv_var),\n 'PREFIX': L.Str(prefix)}\n \n # We break into different cases based on whether we're using\n # demand or not, because the invariants are different in terms\n # of what keys are in the map.\n \n if incaggr.has_demand and not incaggr.half_demand:\n # If the U-set check passes, the key is definitely in\n # the map, so use strict lookups and updates.\n code = L.pc('''\n S_VARS = ELEM\n if KEY in U:\n S_MV = A.smlookup(MASK, KEY)\n NEXT_MAPVAL\n A.smreassignkey(MASK, KEY, MV, PREFIX)\n ''', subst=subst)\n \n else:\n # The keys in the map should exist iff the corresponding\n # operand set is non-empty. For addition, use non-strict\n # lookups and updates, since we don't know whether it was\n # empty before. For removal, use strict lookup since we\n # know it's non-empty, but check the count to tell whether\n # to delete it or strictly reassign it. When using half-\n # demand, only delete it if it's not demanded.\n \n subst['COUNT'] = self.mapval_proj_count(L.ln(mv_var))\n if incaggr.half_demand:\n # Check for a count of 1, not 0, because it's the value\n # of count before the update.\n delete_cond = L.pe('COUNT == 1 and KEY not in U', subst=subst)\n \n else:\n delete_cond = L.pe('COUNT == 1', subst=subst)\n subst['DELETE_COND'] = delete_cond\n \n if op == 'add':\n code = L.pc('''\n S_VARS = ELEM\n S_MV = A.smdeflookup(MASK, KEY, ZERO)\n NEXT_MAPVAL\n A.smnsassignkey(MASK, KEY, MV, PREFIX)\n ''', subst=subst)\n elif op == 'remove':\n code = L.pc('''\n S_VARS = ELEM\n S_MV = A.smlookup(MASK, KEY)\n if DELETE_COND:\n A.smdelkey(MASK, KEY, PREFIX)\n else:\n NEXT_MAPVAL\n A.smreassignkey(MASK, KEY, MV, PREFIX)\n ''', subst=subst)\n else:\n assert()\n \n # Guard code in a delta check if necessary.\n if relmask.has_wildcards or relmask.has_equalities:\n code = L.pc('''\n if deltamatch(R, MASK, ELEM, 1):\n CODE\n ''', subst={'R': spec.rel,\n 'MASK': incaggr.oper_deltamask,\n 'ELEM': elem,\n '<c>CODE': code})\n \n return code\n \n def make_retrieval_code(self):\n \"\"\"Make code for retrieving the value of the aggregate result,\n including demanding it.\n \"\"\"\n incaggr = self.incaggr\n \n params_l = L.List(tuple(L.ln(p) for p in incaggr.params), L.Load())\n \n if incaggr.has_demand:\n code = L.pe('''\n DEMQUERY(NAME, PARAMS_L, RES.smlookup(AGGRMASK, PARAMS_T))\n ''', subst={'NAME': incaggr.name,\n 'PARAMS_L': params_l,\n 'PARAMS_T': L.tuplify(incaggr.params),\n 'RES': incaggr.name,\n 'AGGRMASK': incaggr.aggrmask.make_node()})\n \n else:\n code = L.pe('''\n RES.smdeflookup(AGGRMASK, PARAMS_T, ZERO)\n ''', subst={'RES': incaggr.name,\n 'AGGRMASK': incaggr.aggrmask.make_node(),\n 'PARAMS_T': L.tuplify(incaggr.params),\n 'ZERO': self.make_zero_mapval_expr(),})\n \n code = self.make_proj_mapval_code(code)\n \n return code\n \n def mapval_proj_count(self, mapval_node):\n \"\"\"Given a node for a mapval, return the count component.\n Requires that we're tracking counts.\n \"\"\"\n assert self.incaggr.tracks_counts\n return L.pe('MAPVAL[1]', subst={'MAPVAL': mapval_node})\n \n def make_zero_mapval_expr(self):\n \"\"\"Produce an expression for a map value corresponding to the\n empty set.\n \"\"\"\n zero_expr = self.make_zero_state_expr()\n # If we don't track counts, return the zero state itself.\n if self.incaggr.tracks_counts:\n return L.pe('(ZERO, 0)', subst={'ZERO': zero_expr})\n else:\n return zero_expr\n \n def make_update_mapval_code(self, mv_snode, mv_lnode,\n op, val_node, prefix):\n \"\"\"Produce code to make a new mapval, given an update to\n the corresponding operand. The mapval is read from mv_lnode\n and written to mv_snode.\n \"\"\"\n # If we don't track counts, the mapvals are the same as\n # the states.\n if not self.incaggr.tracks_counts:\n return self.make_update_state_code(mv_snode, mv_lnode,\n op, val_node, prefix)\n \n statevar = prefix + 'state'\n state_lnode = L.ln(statevar)\n state_snode = L.sn(statevar)\n countvar = prefix + 'count'\n \n updatestate_code = self.make_update_state_code(\n state_snode, state_lnode,\n op, val_node, prefix)\n \n if op == 'add':\n template = 'COUNTVAR + 1'\n elif op == 'remove':\n template = 'COUNTVAR - 1'\n else:\n assert()\n new_count_node = L.pe(template, subst={'COUNTVAR': L.ln(countvar)})\n \n return L.pc('''\n S_STATE, S_COUNTVAR = MV\n UPDATE_STATE\n S_MV = STATE, NEW_COUNT\n ''', subst={'S_STATE': state_snode,\n 'S_COUNTVAR': L.sn(countvar),\n 'MV': mv_lnode,\n '<c>UPDATE_STATE': updatestate_code,\n 'STATE': state_lnode,\n 'NEW_COUNT': new_count_node,\n 'S_MV': mv_snode})\n \n def make_proj_mapval_code(self, mapval_node):\n \"\"\"Given an expression for a mapval, return an expression for\n getting the result value.\n \"\"\"\n # Fall-through to state proj code if we don't track counts.\n if not self.incaggr.tracks_counts:\n return self.make_proj_state_code(mapval_node)\n \n state_node = L.pe('MAPVAL[0]', subst={'MAPVAL': mapval_node})\n return self.make_proj_state_code(state_node)\n \n @abstractmethod\n def make_zero_state_expr(self):\n \"\"\"Produce an expression that returns the state corresponding\n to the aggregate's result on an empty set.\n \"\"\"\n \n @abstractmethod\n def make_update_state_code(self, state_snode, state_lnode,\n op, val_node, prefix):\n \"\"\"Produce code to update a state for a given operation and\n value. The state is read from state_lnode and written to\n state_snode.\n \"\"\"\n \n @abstractmethod\n def make_proj_state_code(self, state_node):\n \"\"\"Given an expression for a state, return an expression\n for getting the result value.\n \"\"\"\n return state_node\n\n\nclass CountSumCodegen(AggrCodegen):\n \n \"\"\"Base class for count and sum aggregates, both of which\n use a simple number as their state.\n \"\"\"\n \n kind = None\n \n def make_zero_state_expr(self):\n return L.pe('0')\n \n def make_update_state_code(self, state_snode, state_lnode,\n op, val_node, prefix):\n opstr = {'add': '+', 'remove': '-'}[op]\n bystr = {'count': '1', 'sum': 'VAL'}[self.kind]\n template = L.trim('''\n S_STATE = STATE {OP} {BY}\n '''.format(OP=opstr, BY=bystr))\n return L.pc(template, subst={'S_STATE': state_snode,\n 'STATE': state_lnode,\n 'VAL': val_node})\n \n def make_proj_state_code(self, state_node):\n return state_node\n\nclass CountCodegen(CountSumCodegen):\n kind = 'count'\n\nclass SumCodegen(CountSumCodegen):\n kind = 'sum'\n\n\nclass MinMaxCodegen(AggrCodegen):\n \n \"\"\"Base class for min and max aggregates, both of which\n use a pair of a tree and a saved number as their state.\n \"\"\"\n \n kind = None\n \n def make_zero_state_expr(self):\n return L.pe('(Tree(), None)')\n \n def make_update_state_code(self, state_snode, state_lnode,\n op, val_node, prefix):\n add_template = L.trim('''\n S_TREE, _ = STATE\n TREE[VAL] = None\n S_STATE = (TREE, TREE.MINMAX())\n ''')\n \n remove_template = L.trim('''\n S_TREE, _ = STATE\n del TREE[VAL]\n S_STATE = (TREE, TREE.MINMAX())\n ''')\n \n template = {'add': add_template, 'remove': remove_template}[op]\n \n treevar = prefix + 'tree'\n minmax = {'min': '__min__', 'max': '__max__'}[self.kind] \n code = L.pc(template,\n subst={'S_TREE': L.sn(treevar),\n 'TREE': L.ln(treevar),\n '@MINMAX': minmax,\n 'STATE': state_lnode,\n 'S_STATE': state_snode,\n 'VAL': val_node})\n return code\n \n def make_proj_state_code(self, state_node):\n return L.pe('STATE[1]',\n subst={'STATE': state_node})\n\nclass MinCodegen(MinMaxCodegen):\n kind = 'min'\n\nclass MaxCodegen(MinMaxCodegen):\n kind = 'max'\n\n\ndef get_cg_class(aggrop):\n return {'count': CountCodegen,\n 'sum': SumCodegen,\n 'min': MinCodegen,\n 'max': MaxCodegen}[aggrop]\n\n\nclass AggrMaintainer(L.NodeTransformer):\n \n \"\"\"Maintain an aggregate invariant.\"\"\"\n \n def __init__(self, manager, incaggr):\n super().__init__()\n self.manager = manager\n self.incaggr = incaggr\n \n cgcls = get_cg_class(incaggr.spec.aggrop)\n self.cg = cgcls(incaggr)\n \n name = self.incaggr.name\n self.addfunc = '_maint_{}_add'.format(name)\n self.removefunc = '_maint_{}_remove'.format(name)\n \n def visit_Module(self, node):\n incaggr = self.incaggr\n self.manager.add_invariant(incaggr.name, incaggr)\n \n add_prefix = self.manager.namegen.next_prefix()\n remove_prefix = self.manager.namegen.next_prefix()\n addcode = self.cg.make_oper_maint(add_prefix, 'add', L.pe('_e'))\n removecode = self.cg.make_oper_maint(remove_prefix,\n 'remove', L.pe('_e'))\n \n code = L.pc('''\n RES = Set()\n def ADDFUNC(_e):\n ADDCODE\n def REMOVEFUNC(_e):\n REMOVECODE\n ''', subst={'RES': incaggr.name,\n '<def>ADDFUNC': self.addfunc,\n '<c>ADDCODE': addcode,\n '<def>REMOVEFUNC': self.removefunc,\n '<c>REMOVECODE': removecode})\n node = node._replace(body=code + node.body)\n \n node = self.generic_visit(node)\n \n return node\n \n def visit_SetUpdate(self, node):\n spec = self.incaggr.spec\n \n node = self.generic_visit(node)\n \n if not node.is_varupdate():\n return node\n var, op, elem = node.get_varupdate()\n \n if var == spec.rel:\n precode = postcode = ()\n if op == 'add':\n postcode = L.pc('ADDFUNC(ELEM)',\n subst={'ADDFUNC': self.addfunc,\n 'ELEM': elem})\n elif op == 'remove':\n precode = L.pc('REMOVEFUNC(ELEM)',\n subst={'REMOVEFUNC': self.removefunc,\n 'ELEM': elem})\n else:\n assert()\n \n code = L.Maintenance(self.incaggr.name, L.ts(node),\n precode, (node,), postcode)\n \n elif var == L.N.uset(self.incaggr.name):\n prefix = self.manager.namegen.next_prefix()\n precode = postcode = ()\n if op == 'add':\n postcode = self.cg.make_addu_maint(prefix)\n elif op == 'remove':\n precode = self.cg.make_removeu_maint(prefix)\n else:\n assert()\n \n code = L.Maintenance(self.incaggr.name, L.ts(node),\n precode, (node,), postcode)\n \n else:\n code = node\n \n return code\n\n\nclass AggrReplacer(L.NodeTransformer):\n \n \"\"\"Replace occurrences of an aggregate query.\"\"\"\n \n def __init__(self, manager, incaggr):\n super().__init__()\n self.manager = manager\n self.incaggr = incaggr\n \n cgcls = get_cg_class(incaggr.spec.aggrop)\n self.cg = cgcls(incaggr)\n \n def visit_Module(self, node):\n incaggr = self.incaggr\n \n # Emit demand function if we use demand.\n if incaggr.has_demand:\n maker = L.DemfuncMaker(incaggr.name, str(incaggr.spec),\n incaggr.params, incaggr.uset_lru)\n header_code = maker.make_alldem()\n node = node._replace(body=header_code + node.body)\n \n return self.generic_visit(node)\n \n def visit_Aggregate(self, node):\n node = self.generic_visit(node)\n \n if node != self.incaggr.aggr:\n return node\n \n return self.cg.make_retrieval_code()\n\n\nclass AggrFallbacker(L.NodeTransformer):\n \n \"\"\"Mark occurrences of a query with the option 'impl' set to\n 'batch'.\n \"\"\"\n \n def __init__(self, aggr):\n super().__init__()\n self.aggr = aggr\n \n def visit_Aggregate(self, node):\n node = self.generic_visit(node)\n \n if node == self.aggr:\n new_opts = dict(node.options)\n new_opts['impl'] = 'batch'\n node = node._replace(options=new_opts)\n \n return node\n\n\ndef inc_aggr(tree, manager, aggr, name,\n *, demand, half_demand):\n \"\"\"Incrementalize an aggregate query.\n \n If the aggregate is not of the right form, then, if the global\n options permit it, skip transforming this query. In this case,\n the query gets marked with 'impl' = 'batch' to prevent handling\n it again. If the options don't permit it, raise an exception.\n \"\"\"\n if manager.options.get_opt('verbose'):\n s = ('Incrementalizing ' + name + ': ').ljust(45)\n s += L.ts(aggr)\n print(s)\n \n spec = AggrSpec.from_node(aggr)\n \n uset_lru = manager.options.get_queryopt(aggr, 'uset_lru')\n if uset_lru is None:\n uset_lru = manager.options.get_opt('default_uset_lru')\n demname = name if demand else None\n if not demand:\n half_demand = False\n incaggr = IncAggr(aggr, spec, name, demname, uset_lru, half_demand)\n \n tree = AggrReplacer.run(tree, manager, incaggr)\n tree = AggrMaintainer.run(tree, manager, incaggr)\n \n if 'in_original' in aggr.options:\n manager.original_queryinvs.add(name)\n \n return tree\n\n\ndef aggr_needs_batch(aggr):\n \"\"\"Given an Aggregate node, return True if it must be implemented\n as a batch computation because its form can't be handled\n incrementally.\n \"\"\"\n try:\n AggrSpec.from_node(aggr)\n except L.ProgramError:\n return True\n else:\n return False\n\ndef aggr_needs_dem(aggr):\n \"\"\"Given an Aggregate node, return True if it must use demand in\n order to be incrementalized.\n \"\"\"\n return isinstance(aggr.value, L.DemQuery)\n\ndef aggr_canuse_halfdemand(aggr):\n \"\"\"Given an Aggregate node, return True if it may use the half-\n demand strategy.\n \"\"\"\n # Can't be DemQuery.\n return isinstance(aggr.value, (L.Name, L.SetMatch))\n" }, { "alpha_fraction": 0.554347813129425, "alphanum_fraction": 0.5579710006713867, "avg_line_length": 12.142857551574707, "blob_id": "83c03ae656c96f1faebd076311241e89a7d9cd43", "content_id": "36df0f9a2f22dff4ee37766135dc55abdf90692a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 276, "license_type": "no_license", "max_line_length": 44, "num_lines": 21, "path": "/incoq/tests/programs/deminc/objwild_in.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "# Basic tag-based demand incrementalization.\n\nfrom incoq.runtime import *\n\nOPTIONS(\n obj_domain = True,\n)\n\nQUERYOPTIONS(\n '{S for _ in S}',\n params = ['S'],\n uset_mode = 'all',\n impl = 'dem',\n)\n\nS = Set()\no = Obj()\no.a = 1\nS.add(o)\n\nprint(len_({S for _ in S}))\n" }, { "alpha_fraction": 0.5029304027557373, "alphanum_fraction": 0.5391941666603088, "avg_line_length": 26.57575798034668, "blob_id": "783504d6540e7fb63ea6318708638b4cce29dba4", "content_id": "e305c025d66e5475c99e982ddfd5a1e364338ad9", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "Python", "length_bytes": 2730, "license_type": "no_license", "max_line_length": 76, "num_lines": 99, "path": "/experiments/cache/run_cachetest.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Invoke valgrind to repeatedly run cachetest.py and report\nLL cache misses.\n\"\"\"\n\nimport pickle\nimport subprocess\nimport re\n\n\ndef run(x):\n results = subprocess.check_output(\n ['valgrind',\n '--tool=cachegrind',\n '--cachegrind-out-file=/dev/null',\n \n # 256KB L2 cache\n '--LL=262144,8,64',\n # 3MB L3 cache\n# '--LL=3145728,12,64',\n # 8MB L3 cache\n# '--LL=8388608,16,64',\n \n 'python3.3',\n 'cachetest.py',\n str(x)],\n universal_newlines=True,\n stderr=subprocess.STDOUT)\n return results\n\n\ndef parse_results(results):\n \"\"\"Parse and return LL stats.\"\"\"\n num_pat = r'(\\d+([,.]\\d+)*)'\n drefs_pat = r'D refs:\\s*' + num_pat\n d1misses_pat = r'D1 misses:\\s*' + num_pat\n d1missrate_pat = r'D1 miss rate:\\s*' + num_pat\n llrefs_pat = r'LL refs:\\s*' + num_pat\n llmisses_pat = r'LL misses:\\s*' + num_pat\n llmissrate_pat = r'LL miss rate:\\s*' + num_pat\n \n for line in results.split('\\n'):\n m = re.search(drefs_pat, line)\n if m is not None:\n drefs = m.group(1)\n continue\n m = re.search(d1misses_pat, line)\n if m is not None:\n d1misses = m.group(1)\n continue\n m = re.search(d1missrate_pat, line)\n if m is not None:\n d1missrate = m.group(1)\n continue\n \n m = re.search(llrefs_pat, line)\n if m is not None:\n llrefs = m.group(1)\n continue\n m = re.search(llmisses_pat, line)\n if m is not None:\n llmisses = m.group(1)\n continue\n m = re.search(llmissrate_pat, line)\n if m is not None:\n llmissrate = m.group(1)\n continue\n \n return drefs, d1misses, d1missrate, llrefs, llmisses, llmissrate\n\n\ndef toint(s):\n return int(s.replace(',', ''))\ndef tofloat(s):\n return float(s.replace(',', ''))\n\n\nxs = (list(range(250, 5001, 250)) + \n list(range(6000, 20001, 1000)))\n\ndata = []\n\nfor x in xs:\n res = run(x)\n stats = parse_results(res)\n drefs, d1misses, d1missrate, llrefs, llmisses, llmissrate = stats\n print('x = {:<5} drefs: {:<12} d1misses: {:<12} d1missrate: {:<4}\\n'\n ' llrefs: {:<12} llmisses: {:<12} llmissrate: {:<4}'\n .format(x, drefs, d1misses, d1missrate,\n llrefs, llmisses, llmissrate))\n data.append((toint(drefs), toint(d1misses), tofloat(d1missrate),\n toint(llrefs), toint(llmisses), tofloat(llmissrate)))\n\nprint(xs)\nprint(data)\n\nwith open('cachetest_out.pickle', 'wb') as f:\n pickle.dump(xs, f)\n pickle.dump(data, f)\n print('Wrote out cachetest_out.pickle.')\n" }, { "alpha_fraction": 0.4183673560619354, "alphanum_fraction": 0.48142334818840027, "avg_line_length": 35.066036224365234, "blob_id": "9c3ddd9daa6377d7025fae4376e45fb04af1b5ae", "content_id": "d08495cd7cc6e2b57309b81f74c06fdaa3db3cf5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3822, "license_type": "no_license", "max_line_length": 126, "num_lines": 106, "path": "/incoq/tests/programs/objcomp/expr_out.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from incoq.runtime import *\n# Comp1 := {(s, (o_i + 1)) : (s, o) in _M, (o, o_i) in _F_i}\n# Comp6 := {(s, None) : (s, _) in _M}\n_m_Comp6_out = Map()\ndef _maint__m_Comp6_out_add(_e):\n (v13_1, v13_2) = _e\n if (v13_1 not in _m_Comp6_out):\n _m_Comp6_out[v13_1] = set()\n _m_Comp6_out[v13_1].add(v13_2)\n\ndef _maint__m_Comp6_out_remove(_e):\n (v14_1, v14_2) = _e\n _m_Comp6_out[v14_1].remove(v14_2)\n if (len(_m_Comp6_out[v14_1]) == 0):\n del _m_Comp6_out[v14_1]\n\n_m_Comp1_out = Map()\ndef _maint__m_Comp1_out_add(_e):\n (v11_1, v11_2) = _e\n if (v11_1 not in _m_Comp1_out):\n _m_Comp1_out[v11_1] = set()\n _m_Comp1_out[v11_1].add(v11_2)\n\ndef _maint__m_Comp1_out_remove(_e):\n (v12_1, v12_2) = _e\n _m_Comp1_out[v12_1].remove(v12_2)\n if (len(_m_Comp1_out[v12_1]) == 0):\n del _m_Comp1_out[v12_1]\n\n_m__M_in = Map()\ndef _maint__m__M_in_add(_e):\n (v9_1, v9_2) = _e\n if (v9_2 not in _m__M_in):\n _m__M_in[v9_2] = set()\n _m__M_in[v9_2].add(v9_1)\n\n_m__M_bw = Map()\ndef _maint__m__M_bw_add(_e):\n (v7_1, v7_2) = _e\n if (v7_1 not in _m__M_bw):\n _m__M_bw[v7_1] = RCSet()\n if (() not in _m__M_bw[v7_1]):\n _m__M_bw[v7_1].add(())\n else:\n _m__M_bw[v7_1].incref(())\n\nComp6 = RCSet()\ndef _maint_Comp6__M_add(_e):\n # Iterate {v5_s : (v5_s, _) in deltamatch(_M, 'bw', _e, 1)}\n for v5_s in setmatch(({_e} if ((_m__M_bw[_e[0]] if (_e[0] in _m__M_bw) else RCSet()).getref(()) == 1) else {}), 'uw', ()):\n if ((v5_s, None) not in Comp6):\n Comp6.add((v5_s, None))\n # Begin maint _m_Comp6_out after \"Comp6.add((v5_s, None))\"\n _maint__m_Comp6_out_add((v5_s, None))\n # End maint _m_Comp6_out after \"Comp6.add((v5_s, None))\"\n else:\n Comp6.incref((v5_s, None))\n\nComp1 = RCSet()\ndef _maint_Comp1__M_add(_e):\n # Iterate {(v1_s, v1_o, v1_o_i) : (v1_s, v1_o) in deltamatch(_M, 'bb', _e, 1), (v1_o, v1_o_i) in _F_i}\n (v1_s, v1_o) = _e\n if hasattr(v1_o, 'i'):\n v1_o_i = v1_o.i\n if ((v1_s, (v1_o_i + 1)) not in Comp1):\n Comp1.add((v1_s, (v1_o_i + 1)))\n # Begin maint _m_Comp1_out after \"Comp1.add((v1_s, (v1_o_i + 1)))\"\n _maint__m_Comp1_out_add((v1_s, (v1_o_i + 1)))\n # End maint _m_Comp1_out after \"Comp1.add((v1_s, (v1_o_i + 1)))\"\n else:\n Comp1.incref((v1_s, (v1_o_i + 1)))\n\ndef _maint_Comp1__F_i_add(_e):\n # Iterate {(v3_s, v3_o, v3_o_i) : (v3_s, v3_o) in _M, (v3_o, v3_o_i) in deltamatch(_F_i, 'bb', _e, 1)}\n (v3_o, v3_o_i) = _e\n for v3_s in (_m__M_in[v3_o] if (v3_o in _m__M_in) else set()):\n if ((v3_s, (v3_o_i + 1)) not in Comp1):\n Comp1.add((v3_s, (v3_o_i + 1)))\n # Begin maint _m_Comp1_out after \"Comp1.add((v3_s, (v3_o_i + 1)))\"\n _maint__m_Comp1_out_add((v3_s, (v3_o_i + 1)))\n # End maint _m_Comp1_out after \"Comp1.add((v3_s, (v3_o_i + 1)))\"\n else:\n Comp1.incref((v3_s, (v3_o_i + 1)))\n\ns = Set()\nfor i in [1, 2, 3]:\n o = Obj()\n o.i = i\n # Begin maint Comp1 after \"_F_i.add((o, i))\"\n _maint_Comp1__F_i_add((o, i))\n # End maint Comp1 after \"_F_i.add((o, i))\"\n s.add(o)\n # Begin maint _m__M_in after \"_M.add((s, o))\"\n _maint__m__M_in_add((s, o))\n # End maint _m__M_in after \"_M.add((s, o))\"\n # Begin maint _m__M_bw after \"_M.add((s, o))\"\n _maint__m__M_bw_add((s, o))\n # End maint _m__M_bw after \"_M.add((s, o))\"\n # Begin maint Comp6 after \"_M.add((s, o))\"\n _maint_Comp6__M_add((s, o))\n # End maint Comp6 after \"_M.add((s, o))\"\n # Begin maint Comp1 after \"_M.add((s, o))\"\n _maint_Comp1__M_add((s, o))\n # End maint Comp1 after \"_M.add((s, o))\"\nprint(sorted((_m_Comp1_out[s] if (s in _m_Comp1_out) else set())))\nprint((_m_Comp6_out[s] if (s in _m_Comp6_out) else set()))" }, { "alpha_fraction": 0.5680487155914307, "alphanum_fraction": 0.5696884393692017, "avg_line_length": 27.6510066986084, "blob_id": "a260ceb4180265a7b9a710d743cab6fe7d6ef144", "content_id": "a7e345a42e9aceb15d93e673d2d802f536b29d46", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4269, "license_type": "no_license", "max_line_length": 67, "num_lines": 149, "path": "/incoq/compiler/tup/tupclause.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Clauses for tuple relations.\"\"\"\n\n\n__all__ = [\n 'TClause',\n 'TClause_NoTC',\n 'TupClauseFactory_Mixin',\n]\n\n\nfrom simplestruct.type import checktype\nfrom simplestruct import TypedField\n\nimport incoq.compiler.incast as L\nfrom incoq.compiler.set import Mask\nfrom incoq.compiler.comp import (ClauseFactory, Rate, EnumClause,\n vars_from_tuple)\n\nfrom .tuprel import is_trel, get_trel, make_trel, trel_bindmatch\n\n\nclass TClause(EnumClause):\n \n \"\"\"An enumerator over a tuple relation.\"\"\"\n \n tup = TypedField(str)\n \"\"\"Var for tuple object.\"\"\"\n elts = TypedField(str, seq=True)\n \"\"\"Vars for tuple elements.\"\"\"\n \n @property\n def arity(self):\n return len(self.elts)\n \n typecheck = True\n \n # The elements of a tuple are reachable so long as the tuple\n # itself is. Therefore, we'll set the element positions as\n # constrained and force the tuple object position to get\n # constrained from somewhere else.\n @property\n def con_mask(self):\n return (False,) + tuple(True for _ in self.elts)\n \n @property\n def tagsin_mask(self):\n return (True,) + tuple(False for _ in self.elts)\n \n @property\n def tagsout_mask(self):\n return (False,) + tuple(True for _ in self.elts)\n \n def get_domain_constrs(self, prefix):\n constrs = []\n \n subdoms = [prefix + self.tup + '.' + str(i)\n for i in range(1, len(self.elts) + 1)]\n constr = (prefix + self.tup, tuple(['<T>'] + subdoms))\n constrs.append(constr)\n \n for i, e in enumerate(self.elts, 1):\n constr = (prefix + self.tup + '.' + str(i), prefix + e)\n constrs.append(constr)\n \n return constrs\n \n def get_membership_constrs(self):\n edges = []\n \n for i, e in enumerate(self.elts, 1):\n edges.append((e, self.tup, i))\n \n return tuple(edges)\n \n @classmethod\n def from_AST(cls, node, factory):\n \"\"\"Construct from enumerator of form\n \n (tupvar, elt1, ..., eltn) in _TUPN\n \"\"\"\n checktype(node, L.Enumerator)\n \n lhs = L.get_vartuple(node.target)\n rel = L.get_name(node.iter)\n if not is_trel(rel):\n raise TypeError\n \n tup, *elts = lhs\n arity = get_trel(rel)\n assert arity == len(elts)\n \n return cls(tup, tuple(elts))\n \n def __init__(self, tup, elts):\n assert self.arity >= 2\n self.lhs = (tup,) + tuple(elts)\n self.rel = make_trel(self.arity)\n self.eltvars = vars_from_tuple(self.elts)\n super().__init__(self.lhs, self.rel)\n \n # TODO: Shouldn't rate() and get_code() also allow the special\n # case of going from all bound components to the tuple value\n # composed of these components, in constant time with no auxmap\n # needed?\n \n def rate(self, bindenv):\n mask = Mask.from_vars(self.lhs, bindenv)\n if mask.is_allunbound:\n return Rate.UNRUNNABLE\n elif mask.parts[0] == 'b':\n return Rate.CONSTANT\n return super().rate(bindenv)\n \n def get_determined_vars(self, bindenv):\n if self.tup in bindenv:\n # All elements are determined by the tuple variable.\n return self.eltvars\n else:\n # The tuple variable is determined by the elements.\n return (self.tup,)\n \n def get_code(self, bindenv, body):\n mask = Mask.from_vars(self.lhs, bindenv)\n assert not mask.is_allunbound\n return trel_bindmatch(make_trel(self.arity),\n mask, self.lhs, body,\n typecheck=self.typecheck)\n\nclass TClause_NoTC(TClause):\n \n \"\"\"TClause without type checks in emitted code.\"\"\"\n \n tup = TypedField(str)\n elts = TypedField(str, seq=True)\n \n typecheck = False\n\n\nclass TupClauseFactory_Mixin(ClauseFactory):\n \n \"\"\"Factory that's aware of tuple clauses.\"\"\"\n \n @classmethod\n def get_clause_kinds(cls):\n if cls.typecheck:\n tup_clauses = [TClause]\n else:\n tup_clauses = [TClause_NoTC]\n return tup_clauses + super().get_clause_kinds()\n" }, { "alpha_fraction": 0.5267349481582642, "alphanum_fraction": 0.532423198223114, "avg_line_length": 25.636363983154297, "blob_id": "98744479f45737eb3d8b3361af074c07b723f571", "content_id": "ec7caed289fef972dd7245c413e2ce32e6b25eee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 879, "license_type": "no_license", "max_line_length": 70, "num_lines": 33, "path": "/loc.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Print line count breakdown.\"\"\"\n\nfrom os import chdir\nfrom os.path import dirname\nfrom pathlib import PurePath\n\nfrom incoq.util.linecount import get_counts_for_dir\n\n\ndef show(dir, dirs_only=False):\n # All paths relative to this file (top-level dir).\n ex = [\n '.git',\n '.tox',\n '__pycache__',\n ]\n res = get_counts_for_dir(dir, ex)\n for name, count in res:\n p = PurePath(name)\n indent = len(p.parts) - 1\n label = '| ' * (indent - 1) + '|--- ' if indent > 0 else ''\n label += p.name\n number = str(count)\n if name.endswith('.py'):\n number = ' ' + number\n if dirs_only:\n continue\n print('{:40} {}'.format(label, number))\n\nif __name__ == '__main__':\n chdir(dirname(__file__))\n show('incoq', dirs_only=True)\n# show('experiments', dirs_only=True)\n" }, { "alpha_fraction": 0.4262596666812897, "alphanum_fraction": 0.48142698407173157, "avg_line_length": 33.43037796020508, "blob_id": "9e97e94288550cb90ddbb24163dae6e13562e07a", "content_id": "1bd4919666ee291f572abd897a379f99ec41d8f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2719, "license_type": "no_license", "max_line_length": 106, "num_lines": 79, "path": "/incoq/tests/programs/objcomp/inc_out.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from incoq.runtime import *\n# Comp1 := {(s, o_i) : (s, o) in _M, (o, o_i) in _F_i}\n_m_Comp1_out = Map()\ndef _maint__m_Comp1_out_add(_e):\n (v7_1, v7_2) = _e\n if (v7_1 not in _m_Comp1_out):\n _m_Comp1_out[v7_1] = set()\n _m_Comp1_out[v7_1].add(v7_2)\n\ndef _maint__m_Comp1_out_remove(_e):\n (v8_1, v8_2) = _e\n _m_Comp1_out[v8_1].remove(v8_2)\n if (len(_m_Comp1_out[v8_1]) == 0):\n del _m_Comp1_out[v8_1]\n\n_m__M_in = Map()\ndef _maint__m__M_in_add(_e):\n (v5_1, v5_2) = _e\n if (v5_2 not in _m__M_in):\n _m__M_in[v5_2] = set()\n _m__M_in[v5_2].add(v5_1)\n\nComp1 = RCSet()\ndef _maint_Comp1__M_add(_e):\n # Iterate {(v1_s, v1_o, v1_o_i) : (v1_s, v1_o) in deltamatch(_M, 'bb', _e, 1), (v1_o, v1_o_i) in _F_i}\n (v1_s, v1_o) = _e\n if hasattr(v1_o, 'i'):\n v1_o_i = v1_o.i\n if ((v1_s, v1_o_i) not in Comp1):\n Comp1.add((v1_s, v1_o_i))\n # Begin maint _m_Comp1_out after \"Comp1.add((v1_s, v1_o_i))\"\n _maint__m_Comp1_out_add((v1_s, v1_o_i))\n # End maint _m_Comp1_out after \"Comp1.add((v1_s, v1_o_i))\"\n else:\n Comp1.incref((v1_s, v1_o_i))\n\ndef _maint_Comp1__F_i_add(_e):\n # Iterate {(v3_s, v3_o, v3_o_i) : (v3_s, v3_o) in _M, (v3_o, v3_o_i) in deltamatch(_F_i, 'bb', _e, 1)}\n (v3_o, v3_o_i) = _e\n for v3_s in (_m__M_in[v3_o] if (v3_o in _m__M_in) else set()):\n if ((v3_s, v3_o_i) not in Comp1):\n Comp1.add((v3_s, v3_o_i))\n # Begin maint _m_Comp1_out after \"Comp1.add((v3_s, v3_o_i))\"\n _maint__m_Comp1_out_add((v3_s, v3_o_i))\n # End maint _m_Comp1_out after \"Comp1.add((v3_s, v3_o_i))\"\n else:\n Comp1.incref((v3_s, v3_o_i))\n\nN = Set()\nfor i in range(1, 5):\n N._add(i)\ns1 = Set()\ns2 = Set()\nfor i in N:\n o = Obj()\n o.i = i\n # Begin maint Comp1 after \"_F_i.add((o, i))\"\n _maint_Comp1__F_i_add((o, i))\n # End maint Comp1 after \"_F_i.add((o, i))\"\n if (i % 2):\n s1.add(o)\n # Begin maint _m__M_in after \"_M.add((s1, o))\"\n _maint__m__M_in_add((s1, o))\n # End maint _m__M_in after \"_M.add((s1, o))\"\n # Begin maint Comp1 after \"_M.add((s1, o))\"\n _maint_Comp1__M_add((s1, o))\n # End maint Comp1 after \"_M.add((s1, o))\"\n else:\n s2.add(o)\n # Begin maint _m__M_in after \"_M.add((s2, o))\"\n _maint__m__M_in_add((s2, o))\n # End maint _m__M_in after \"_M.add((s2, o))\"\n # Begin maint Comp1 after \"_M.add((s2, o))\"\n _maint_Comp1__M_add((s2, o))\n # End maint Comp1 after \"_M.add((s2, o))\"\ns = s1\nprint(sorted((_m_Comp1_out[s] if (s in _m_Comp1_out) else set())))\ns = s2\nprint(sorted((_m_Comp1_out[s] if (s in _m_Comp1_out) else set())))" }, { "alpha_fraction": 0.47927165031433105, "alphanum_fraction": 0.480416864156723, "avg_line_length": 35.53556442260742, "blob_id": "644445b9b98ec6e709b77ed5d5eb3dc7328f5c03", "content_id": "3dcf3506672a99e1226325104ab482fe73badcd5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8732, "license_type": "no_license", "max_line_length": 75, "num_lines": 239, "path": "/incoq/compiler/incast/macros.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Macro processors for expanding one-line operations to several\nlines of code.\n\"\"\"\n\n\n__all__ = [\n 'IncMacroProcessor',\n]\n\n\nfrom .nodes import Str\nfrom .structconv import parse_structast, astargs\nfrom .helpers import tuplify, ln, sn\nfrom .nodeconv import IncLangImporter\n\n\nclass IncMacroProcessor(IncLangImporter):\n \n \"\"\"Expand macro operations.\"\"\"\n \n def pc(self, source, *, mode=None, **kargs):\n \"\"\"Helper, allows macros to be defined in terms of other macros.\"\"\"\n tree = parse_structast(source, mode='code', **kargs)\n return self.run(tree)\n \n # Set macros.\n \n def handle_ms_nsadd(self, f, target, elem):\n return self.pc('''\n if ELEM not in TARGET:\n TARGET.add(ELEM)\n ''', subst={'TARGET': target, 'ELEM': elem})\n \n def handle_ms_nsremove(self, f, target, elem):\n return self.pc('''\n if ELEM in TARGET:\n TARGET.remove(ELEM)\n ''', subst={'TARGET': target, 'ELEM': elem})\n \n def handle_ms_rcadd(self, f, target, elem):\n return self.pc('''\n if ELEM not in TARGET:\n TARGET.add(ELEM)\n else:\n TARGET.incref(ELEM)\n ''', subst={'TARGET': target, 'ELEM': elem})\n \n def handle_ms_rcremove(self, f, target, elem):\n return self.pc('''\n if TARGET.getref(ELEM) == 1:\n TARGET.remove(ELEM)\n else:\n TARGET.decref(ELEM)\n ''', subst={'TARGET': target, 'ELEM': elem})\n \n # Obj macros.\n \n @astargs\n def handle_ms_nsassignfield(self, f, target, field:'Name', value):\n return self.pc('''\n TARGET.nsdelfield(FIELD)\n TARGET.ATTR_FIELD = VALUE\n ''', subst={'TARGET': target,\n 'FIELD': ln(field),\n '@ATTR_FIELD': field,\n 'VALUE': value})\n \n @astargs\n def handle_ms_nsdelfield(self, f, target, field:'Name'):\n return self.pc('''\n if hasattr(TARGET, STR_FIELD):\n del TARGET.ATTR_FIELD\n ''', subst={'TARGET': target,\n 'STR_FIELD': Str(field),\n '@ATTR_FIELD': field})\n \n # Map macros.\n \n def handle_ms_nsassignkey(self, f, target, key, value):\n return self.pc('''\n TARGET.nsdelkey(KEY)\n TARGET.assignkey(KEY, VALUE)\n ''', subst={'TARGET': target, 'KEY': key, 'VALUE': value})\n \n def handle_ms_nsdelkey(self, f, target, key):\n return self.pc('''\n if KEY in TARGET:\n TARGET.delkey(KEY)\n ''', subst={'TARGET': target, 'KEY': key})\n \n def handle_ms_imgadd(self, f, target, key, elem):\n return self.pc('''\n if KEY not in TARGET:\n TARGET.assignkey(KEY, set())\n TARGET[KEY].add(ELEM)\n ''', subst={'TARGET': target, 'KEY': key, 'ELEM': elem})\n \n def handle_ms_imgremove(self, f, target, key, elem):\n return self.pc('''\n TARGET[KEY].remove(ELEM)\n if TARGET[KEY].isempty():\n TARGET.delkey(KEY)\n ''', subst={'TARGET': target, 'KEY': key, 'ELEM': elem})\n \n def handle_ms_nsimgadd(self, f, target, key, elem):\n return self.pc('''\n if KEY not in TARGET:\n TARGET.assignkey(KEY, set())\n TARGET[KEY].nsadd(ELEM)\n ''', subst={'TARGET': target, 'KEY': key, 'ELEM': elem})\n \n def handle_ms_nsimgremove(self, f, target, key, elem):\n return self.pc('''\n if KEY in TARGET:\n if ELEM in TARGET[KEY]:\n TARGET[KEY].remove(ELEM)\n if TARGET[KEY].isempty():\n TARGET.delkey(KEY)\n ''', subst={'TARGET': target, 'KEY': key, 'ELEM': elem})\n \n def handle_ms_rcimgadd(self, f, target, key, elem):\n return self.pc('''\n if KEY not in TARGET:\n TARGET.assignkey(KEY, RCSet())\n TARGET[KEY].rcadd(ELEM)\n ''', subst={'TARGET': target, 'KEY': key, 'ELEM': elem})\n \n def handle_ms_rcimgremove(self, f, target, key, elem):\n return self.pc('''\n TARGET[KEY].rcremove(ELEM)\n if TARGET[KEY].isempty():\n TARGET.delkey(KEY)\n ''', subst={'TARGET': target, 'KEY': key, 'ELEM': elem})\n \n # Setmap macros.\n \n @astargs\n def handle_ms_smassignkey(self, f, target, maskstr:'Str', key, elem,\n prefix:'Str'):\n from incoq.compiler.set import Mask\n mask = Mask(maskstr)\n assert mask.is_keymask\n # vars for each bound component ((len(mask) - 1) many).\n vars = [prefix + str(i) for i in range(1, len(mask))]\n return self.pc('''\n S_VARS = KEY\n TARGET.add(PARTS)\n ''', subst={'S_VARS': tuplify(vars, lval=True),\n 'KEY': key,\n 'TARGET': target,\n 'PARTS': tuplify(vars + [elem])})\n \n @astargs\n def handle_ms_smdelkey(self, f, target, maskstr:'Str', key,\n prefix:'Str'):\n from incoq.compiler.set import Mask\n mask = Mask(maskstr)\n assert mask.is_keymask\n # vars for each bound component ((len(mask) - 1) many).\n vars = [prefix + str(i) for i in range(1, len(mask))]\n # var for element component\n evar = prefix + 'elem'\n return self.pc('''\n S_VARS = KEY\n S_EVAR = TARGET.smlookup(MASK, KEY)\n TARGET.remove(PARTS)\n ''', subst={'S_VARS': tuplify(vars, lval=True),\n 'KEY': key,\n 'S_EVAR': evar,\n 'MASK': Str(maskstr),\n 'TARGET': target,\n 'PARTS': tuplify(vars + [evar])})\n \n @astargs\n def handle_ms_smnsassignkey(self, f, target, maskstr:'Str', key, elem,\n prefix:'Str'):\n from incoq.compiler.set import Mask\n mask = Mask(maskstr)\n assert mask.is_keymask\n # vars for each bound component ((len(mask) - 1) many).\n vars = [prefix + str(i) for i in range(1, len(mask))]\n # var for element component\n evar = prefix + 'elem'\n return self.pc('''\n S_VARS = KEY\n if not setmatch(TARGET, MASK, KEY).isempty():\n S_EVAR = TARGET.smlookup(MASK, KEY)\n TARGET.remove(PARTS_OLD)\n TARGET.add(PARTS_NEW)\n ''', subst={'S_VARS': tuplify(vars, lval=True),\n 'KEY': key,\n 'TARGET': target,\n 'MASK': Str(maskstr),\n 'S_EVAR': evar,\n 'PARTS_OLD': tuplify(vars + [evar]),\n 'PARTS_NEW': tuplify(vars + [elem])})\n \n @astargs\n def handle_ms_smnsdelkey(self, f, target, maskstr:'Str', key,\n prefix:'Str'):\n from incoq.compiler.set import Mask\n mask = Mask(maskstr)\n assert mask.is_keymask\n # vars for each bound component ((len(mask) - 1) many).\n vars = [prefix + str(i) for i in range(1, len(mask))]\n # var for element component\n evar = prefix + 'elem'\n return self.pc('''\n if not setmatch(TARGET, MASK, KEY).isempty():\n S_VARS = KEY\n S_EVAR = TARGET.smlookup(MASK, KEY)\n TARGET.remove(PARTS)\n ''', subst={'TARGET': target,\n 'MASK': Str(maskstr),\n 'KEY': key,\n 'S_VARS': tuplify(vars, lval=True),\n 'S_EVAR': evar,\n 'PARTS': tuplify(vars + [evar])})\n \n @astargs\n def handle_ms_smreassignkey(self, f, target, maskstr:'Str', key, elem,\n prefix:'Str'):\n from incoq.compiler.set import Mask\n mask = Mask(maskstr)\n assert mask.is_keymask\n vars = [prefix + str(i) for i in range(1, len(mask))]\n evar = prefix + 'elem'\n return self.pc('''\n S_VARS = KEY\n S_EVAR = TARGET.smlookup(MASK, KEY)\n TARGET.remove(OLD_PARTS)\n TARGET.add(NEW_PARTS)\n ''', subst={'S_VARS': tuplify(vars, lval=True),\n 'KEY': key,\n 'S_EVAR': sn(evar),\n 'TARGET': target,\n 'MASK': Str(maskstr),\n 'OLD_PARTS': tuplify(vars + [evar]),\n 'NEW_PARTS': tuplify(vars + [elem])})\n" }, { "alpha_fraction": 0.5974025726318359, "alphanum_fraction": 0.5991342067718506, "avg_line_length": 19.625, "blob_id": "d6d1fe18605d8c4b5bd499eadd46fa26d7bd154a", "content_id": "34f6a783a626c4343b2b7a14a1b459a6d32644f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1155, "license_type": "no_license", "max_line_length": 85, "num_lines": 56, "path": "/experiments/twitter/twitter_in.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "# Twitter example.\n\n# Celebs to their set of followers is 1-to-1, but this\n# fact doesn't really change the join orders.\n\nfrom incoq.runtime import *\n\nOPTIONS(\n obj_domain = True,\n)\n\nQUERYOPTIONS(\n '{user.email for user in celeb.followers if user in group if user.loc == \"NYC\"}',\n params = ['celeb', 'group'],\n uset_mode = 'all',\n no_rc = True,\n)\n\ndef make_user(email, loc):\n u = Obj()\n u.followers = Set()\n u.email = email\n u.loc = loc\n return u\n\ndef make_group():\n g = Set()\n return g\n\ndef follow(u, c):\n assert u not in c.followers\n c.followers.add(u)\n\ndef unfollow(u, c):\n assert u in c.followers\n c.followers.remove(u)\n\ndef join_group(u, g):\n assert u not in g\n g.add(u)\n\ndef leave_group(u, g):\n assert u in g\n g.remove(u)\n\ndef change_loc(u, loc):\n del u.loc\n u.loc = loc\n\ndef do_query(celeb, group):\n return {user.email for user in celeb.followers if user in group\n if user.loc == 'NYC'}\n\ndef do_query_nodemand(celeb, group):\n return NODEMAND({user.email for user in celeb.followers if user in group\n if user.loc == 'NYC'})\n" }, { "alpha_fraction": 0.4625000059604645, "alphanum_fraction": 0.48750001192092896, "avg_line_length": 13.117647171020508, "blob_id": "f4685d90655e13cabb1751e00da0794cf7637418", "content_id": "a7a8bf73cadce9967ec497b5f9373b26080e1544", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 240, "license_type": "no_license", "max_line_length": 60, "num_lines": 17, "path": "/incoq/tests/programs/aggr/params_in.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "# Aggregate of a lookup.\n\nfrom incoq.runtime import *\n\nOPTIONS(\n default_impl = 'inc',\n)\n\nR = Set()\n\nfor x in [('A', 1), ('A', 2), ('A', 3), ('B', 4), ('B', 5)]:\n R.add(x)\n\nR.remove(('B', 5))\n\nk = 'A'\nprint(sum(setmatch(R, 'bu', k)))\n" }, { "alpha_fraction": 0.4265364408493042, "alphanum_fraction": 0.42714694142341614, "avg_line_length": 31.760000228881836, "blob_id": "4bbd8a367a4392eede3ebf8058afe85a705f1796", "content_id": "27f733c75dba1bf00f88ce631d680a00561fd21e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4914, "license_type": "no_license", "max_line_length": 78, "num_lines": 150, "path": "/incoq/tests/invinc/cost/test_analyze.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Unit tests for analyze.py.\"\"\"\n\n\nimport unittest\n\nimport incoq.compiler.incast as L\nfrom incoq.compiler.set import Mask\nfrom incoq.compiler.central import CentralCase\n\nfrom incoq.compiler.cost.cost import *\nfrom incoq.compiler.cost.analyze import *\nfrom incoq.compiler.cost.analyze import (CostAnalyzer, func_costs,\n type_to_cost, VarRewriter)\n\n\nclass AnalyzeCase(CentralCase):\n \n def test_intraproc_simple(self):\n tree = L.pc('''\n for x in S:\n for y in T:\n foo\n for z in R:\n bar\n ''')\n cost = CostAnalyzer.run(tree, (), {}, {})\n exp_cost_str = '((S*T) + R)'\n self.assertEqual(str(cost), exp_cost_str)\n \n tree = L.pc('''\n if True:\n for x in S:\n for y in T:\n foo\n baz\n for w in T:\n pass\n else:\n for q in A:\n baz()\n for z in S:\n bar\n ''')\n cost = CostAnalyzer.run(tree, (), {}, {})\n exp_cost_str = '((S*T) + (A*UNKNOWN_baz))'\n self.assertEqual(str(cost), exp_cost_str)\n \n tree = L.pc('''\n while True:\n for x in T:\n pass\n for z in S:\n bar\n ''')\n cost = CostAnalyzer.run(tree, (), {}, {})\n exp_cost_str = '((?*T) + S)'\n self.assertEqual(str(cost), exp_cost_str)\n \n def test_intraproc_setmatch(self):\n tree = L.pc('''\n result = set()\n for y in setmatch(R, 'bu', x):\n for z in setmatch(S, 'bu', y):\n if (z not in result):\n result.add(z)\n return result\n ''')\n cost = CostAnalyzer.run(tree, ('x'), {}, {})\n exp_cost_str = '(R_out[x]*S_out)'\n self.assertEqual(str(cost), exp_cost_str)\n \n def test_interproc_simple(self):\n tree = L.pc('''\n def f():\n for x in S:\n for y in T:\n foo\n def g():\n for z in R:\n f()\n ''')\n costs = func_costs(tree)\n exp_cost_strs = {'f': '(S*T)',\n 'g': '(R*S*T)'}\n self.assertEqual({k: str(v) for k, v in costs.items()}, exp_cost_strs)\n \n def test_interproc_setmatch(self):\n tree = L.pc('''\n def f(x, y):\n for a in setmatch(R, 'bu', x):\n for b in setmatch(S, 'bu', y):\n for c in setmatch(T, 'bu', z):\n foo\n def g(u):\n for v in Z:\n f(u, v)\n ''')\n costs = func_costs(tree)\n exp_cost_strs = {'f': '(R_out[x]*S_out[y]*T_out)',\n 'g': '(Z*R_out[u]*S_out*T_out)'}\n self.assertEqual({k: str(v) for k, v in costs.items()}, exp_cost_strs)\n \n def test_costlabel(self):\n tree = L.pc('''\n def f(x):\n for y in setmatch(R, 'bu', x):\n pass\n ''')\n costmap = func_costs(tree)\n tree = CostLabelAdder.run(tree, costmap)\n exp_tree = L.pc('''\n def f(x):\n Comment('Cost: O(R_out[x])')\n for y in setmatch(R, 'bu', x):\n pass\n ''')\n self.assertEqual(tree, exp_tree)\n \n def test_typetocost(self):\n ST, TT, OT = L.SetType, L.TupleType, L.ObjType\n t = TT([OT('A'), OT('B'), TT([OT('C'), OT('D')])])\n cost = type_to_cost(t)\n exp_cost = ProductCost([NameCost('A'), NameCost('B'),\n ProductCost([NameCost('C'), NameCost('D')])])\n self.assertEqual(cost, exp_cost)\n \n cost = type_to_cost(t, {(1,): UnitCost(), (2, 1): UnitCost()})\n exp_cost = ProductCost([NameCost('A'), UnitCost(),\n ProductCost([NameCost('C'), UnitCost()])])\n self.assertEqual(cost, exp_cost)\n \n def test_VarRewriter(self):\n ST, TT, OT = L.SetType, L.TupleType, L.ObjType\n t = ST(TT([OT('A'), OT('B'), TT([OT('C'), OT('D')])]))\n self.manager.vartypes['R'] = t\n \n cost = NameCost('R')\n cost = VarRewriter.run(cost, self.manager)\n exp_cost = ProductCost([NameCost('A'), NameCost('B'),\n NameCost('C'), NameCost('D')])\n self.assertEqual(cost, exp_cost)\n \n cost = IndefImgsetCost('R', Mask('buu'))\n cost = VarRewriter.run(cost, self.manager)\n exp_cost = ProductCost([NameCost('B'), NameCost('C'), NameCost('D')])\n self.assertEqual(cost, exp_cost)\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.47589099407196045, "alphanum_fraction": 0.5094339847564697, "avg_line_length": 16.035715103149414, "blob_id": "2de4c922be183af1c7883e9dcc1974d7b84b6fc5", "content_id": "ea76354dc66efaa63f9232f44186fed054727fb6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 477, "license_type": "no_license", "max_line_length": 67, "num_lines": 28, "path": "/incoq/tests/programs/comp/inline_in.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "# Inline comp maintenance.\n\nfrom incoq.runtime import *\n\nOPTIONS(\n maint_inline = True,\n)\n\nQUERYOPTIONS(\n '{x for (x, y) in E if f(y)}',\n impl = 'inc',\n)\nQUERYOPTIONS(\n '{(x, z) for (x, y) in E for (y2, z) in E if y == y2}',\n impl = 'inc',\n)\n\ndef f(y):\n return True\n\nE = Set()\n\nfor v1, v2 in {(1, 2), (1, 3), (2, 3), (3, 4)}:\n E.add((v1, v2))\n\nprint(sorted({x for (x, y) in E if f(y)}))\n\nprint(sorted({(x, z) for (x, y) in E for (y2, z) in E if y == y2}))\n" }, { "alpha_fraction": 0.4006849229335785, "alphanum_fraction": 0.4071537256240845, "avg_line_length": 31.850000381469727, "blob_id": "66a6063bd027347de13bbe04e811c13daea735c2", "content_id": "c46d10ce981d4ca4bba9190fdfa4a19af52c50bc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15768, "license_type": "no_license", "max_line_length": 78, "num_lines": 480, "path": "/incoq/tests/invinc/incast/test_util.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Unit tests for util.py.\"\"\"\n\n\nimport unittest\n\nfrom incoq.util.collections import OrderedSet\nfrom incoq.compiler.incast.nodes import *\nfrom incoq.compiler.incast.structconv import parse_structast, NodeTransformer\nfrom incoq.compiler.incast.nodeconv import IncLangImporter\nfrom incoq.compiler.incast.util import *\nfrom incoq.compiler.incast.util import FuncDefLister\nfrom incoq.compiler.incast import ts\n\n\nclass UtilCase(unittest.TestCase):\n \n def p(self, source, subst=None, mode=None):\n return IncLangImporter.run(\n parse_structast(source, mode=mode, subst=subst))\n \n def pc(self, source, **kargs):\n return self.p(source, mode='code', **kargs)\n \n def ps(self, source, **kargs):\n return self.p(source, mode='stmt', **kargs)\n \n def pe(self, source, **kargs):\n return self.p(source, mode='expr', **kargs)\n \n def test_usedvars(self):\n tree = self.p('''\n a = b\n b = c\n d = e + f\n R.add(1)\n {() for a in b}\n ''')\n vars = VarsFinder.run(tree)\n exp_vars = ['a', 'b', 'c', 'd', 'e', 'f', 'R']\n self.assertSequenceEqual(vars, exp_vars)\n \n # Ignore store.\n vars = VarsFinder.run(tree, ignore_store=True)\n self.assertSequenceEqual(vars, ['a', 'b', 'c', 'e', 'f'])\n \n # Ignore functions.\n tree = self.p('''\n def f(a, b):\n c\n if f(d):\n pass\n ''')\n names = VarsFinder.run(tree, ignore_functions=False)\n vars = VarsFinder.run(tree, ignore_functions=True)\n self.assertCountEqual(names, ['c', 'd', 'f'])\n self.assertCountEqual(vars, ['c', 'd'])\n \n # Ignore relations.\n tree = self.p('''\n COMP({x for x in S if sum(R) > 0\n if sum(setmatch(T, 'bu', 1)) > 0\n if y in Z},\n [], {})\n ''')\n names1 = VarsFinder.run(tree, ignore_rels=False)\n names2 = VarsFinder.run(tree, ignore_rels=True)\n self.assertCountEqual(names1, ['x', 'S', 'R', 'T', 'y', 'Z'])\n self.assertCountEqual(names2, ['x', 'y'])\n \n def test_varrenamer(self):\n tree = self.p('''\n (a, b, c) = (a, b, c)\n ''')\n nameit = iter(['b1', 'b2'])\n subst = {'a': 'a1', 'b': lambda n: next(nameit)}\n tree = VarRenamer.run(tree, subst)\n exp_tree = self.p('''\n (a1, b1, c) = (a1, b2, c)\n ''')\n self.assertEqual(tree, exp_tree)\n \n def test_scope(self):\n os = OrderedSet\n \n # Ensure that scope stack info is returned correctly.\n \n class FooScoper(ScopeVisitor):\n def process(self, tree):\n self.res = []\n super().process(tree)\n return self.res\n def visit_Name(self, node):\n super().visit_Name(node)\n if node.id == 'X':\n self.res.append(list(self._scope_stack))\n \n tree = self.p('''\n def f(p):\n X\n for R in f(1):\n if R:\n X\n {a for a in R if len({X for a in a}) > 0}\n ''')\n res = FooScoper.run(tree)\n res_flat = [ScopeVisitor.bvars_from_scopestack(sc)\n for sc in res]\n \n exp_res = [\n [os(['f', 'R']), os(['p'])],\n [os(['f', 'R'])],\n [os(['f', 'R']), os(['a']), os(['a'])],\n ]\n exp_res_flat = [\n os(['f', 'R', 'p']),\n os(['f', 'R']),\n os(['f', 'R', 'a']),\n ]\n \n self.assertEqual(res, exp_res)\n self.assertEqual(res_flat, exp_res_flat)\n \n # Ensure the order of traversal is correct.\n \n class FooScoper(ScopeVisitor):\n def process(self, tree):\n self.res = {}\n super().process(tree)\n return self.res\n def visit_Name(self, node):\n if isinstance(node.ctx, Store):\n self.res[node.id] = self.current_bvars()\n super().visit_Name(node)\n \n tree = self.p('''\n a = 1\n for d in {c2 for c1 in R for c2 in {b for b in a}}:\n e = 2\n ''')\n res = FooScoper.run(tree)\n \n exp_res = {\n 'a': os([]),\n 'c1': os(['a']),\n 'b': os(['a', 'c1']),\n 'c2': os(['a', 'c1']),\n 'd': os(['a']),\n 'e': os(['a', 'd']),\n }\n \n self.assertEqual(res, exp_res)\n \n def test_prefix_names(self):\n tree = self.p('a = b + c')\n tree = prefix_names(tree, ['a', 'b'], 'p')\n \n exp_tree = self.p('pa = pb + c')\n \n self.assertEqual(tree, exp_tree)\n \n def test_name_counter(self):\n namegen = NameGenerator(fmt='a{}', counter=100)\n self.assertEqual(namegen.peek(), 'a100')\n self.assertEqual(namegen.peek(), 'a100')\n self.assertEqual(namegen.next(), 'a100')\n self.assertEqual(namegen.next_prefix(), 'a101_')\n self.assertEqual(next(namegen), 'a102')\n \n tree = self.p('a = b + c')\n tree = namegen.prefix_names(tree, {'a', 'b'})\n \n exp_tree = self.p('a103_a = a103_b + c')\n \n self.assertEqual(tree, exp_tree)\n \n def test_injective(self):\n tree = self.pe('a')\n self.assertTrue(is_injective(tree))\n tree = self.pe('a + b')\n self.assertFalse(is_injective(tree))\n tree = self.pe('(a, (b, c))')\n self.assertTrue(is_injective(tree))\n tree = self.pe('(a, (b, (c + d)))')\n self.assertFalse(is_injective(tree))\n \n def test_query_replacer(self):\n q1 = Aggregate(self.pe('a'), 'count', {})\n q2 = Aggregate(self.pe('a'), 'sum', {})\n \n tree = Expr(q1)\n tree = QueryReplacer.run(tree, q1, q2)\n exp_tree = Expr(q2)\n self.assertEqual(tree, exp_tree)\n \n def test_query_mapper(self):\n q1 = Aggregate(self.p('a'), 'count', {})\n q2 = Aggregate(self.p('a'), 'sum', {})\n \n class Foo(QueryMapper):\n def map_Aggregate(self, query):\n return query._replace(op='sum')\n \n tree = Expr(q1)\n tree = Foo.run(tree)\n exp_tree = Expr(q2)\n self.assertEqual(tree, exp_tree)\n \n def test_StmtTransformer(self):\n _self = self\n \n class Foo(StmtTransformer):\n def visit_arg(self, node):\n new_code = _self.pc('print(x)', subst={'x': node.arg})\n self.pre_stmts.extend(new_code)\n def visit_Name(self, node):\n new_code = _self.pc('print(x)', subst={'x': node.id})\n self.pre_stmts.extend(new_code)\n \n tree = self.p('''\n def f(x):\n g(x)\n ''')\n tree = Foo.run(tree)\n \n exp_tree = self.p('''\n print(x)\n def f(x):\n print(g)\n print(x)\n g(x)\n ''')\n \n self.assertEqual(tree, exp_tree)\n \n def test_OuterMaintTransformer(self):\n _self = self\n \n class Foo(OuterMaintTransformer):\n def visit_SetUpdate(self, node):\n id = node.target.id\n name = 'Q_' + id\n desc = ts(node)\n precode = _self.pc('print(N)', subst={'N': id + '_pre'})\n postcode = _self.pc('print(N)', subst={'N': id + '_post'})\n return self.with_outer_maint(node, name, desc,\n precode, postcode)\n \n in_tree = self.p('''\n with MAINT(T, 'after', 'S.add(1)'):\n with MAINT(P, 'after', 'S.add(1)'):\n S.add(1)\n R.add(2)\n ''')\n tree = Foo.run(in_tree, ['P', 'T'])\n \n exp_tree = Module(\n (Maintenance(\n 'Q_S', 'S.add(1)',\n self.pc('print(S_pre)'),\n (Maintenance(\n 'T', 'S.add(1)',\n (),\n (Maintenance(\n 'P', 'S.add(1)',\n (),\n self.pc('S.add(1)'),\n (Maintenance(\n 'Q_R', 'R.add(2)',\n self.pc('print(R_pre)'),\n self.pc('R.add(2)'),\n self.pc('print(R_post)')\n ),)\n ),),\n ()\n ),),\n self.pc('print(S_post)'),\n ),))\n \n self.assertEqual(tree, exp_tree)\n \n tree = Foo.run(in_tree, ['P'])\n \n exp_tree = Module(\n (Maintenance(\n 'T', 'S.add(1)',\n (),\n (Maintenance(\n 'Q_S', 'S.add(1)',\n self.pc('print(S_pre)'),\n (Maintenance(\n 'P', 'S.add(1)',\n (),\n self.pc('S.add(1)'),\n (Maintenance(\n 'Q_R', 'R.add(2)',\n self.pc('print(R_pre)'),\n self.pc('R.add(2)'),\n self.pc('print(R_post)')\n ),)\n ),),\n self.pc('print(S_post)'),\n ),),\n ()\n ),))\n \n self.assertEqual(tree, exp_tree)\n \n def test_rewrite_compclauses(self):\n class FooRewriter(NodeTransformer):\n def process(self, tree):\n self.new_clauses = []\n tree = super().process(tree)\n return tree, self.new_clauses\n \n def visit_Name(self, node):\n if node.id in ['x', 'y', 'z']:\n newcl = Enumerator(Name('_' + node.id, Store()),\n Name('R', Load()))\n self.new_clauses.append(newcl)\n return node._replace(id='_' + node.id)\n \n orig_comp = self.pe('COMP({x + z for x in S if y > 1}, [])')\n \n comp = rewrite_compclauses(orig_comp, FooRewriter.run)\n exp_comp = self.pe(\n 'COMP({_x + _z for _x in R for _x in S '\n 'for _y in R if _y > 1 '\n 'for _x in R for _z in R},'\n '[])')\n self.assertEqual(comp, exp_comp)\n \n comp = rewrite_compclauses(orig_comp, FooRewriter.run, after=True)\n exp_comp = self.pe(\n 'COMP({_x + _z for _x in S for _x in R '\n 'if _y > 1 for _y in R '\n 'for _x in R for _z in R},'\n '[])')\n self.assertEqual(comp, exp_comp)\n \n comp = rewrite_compclauses(orig_comp, FooRewriter.run, enum_only=True)\n exp_comp = self.pe(\n 'COMP({x + z for _x in R for _x in S '\n 'if y > 1},'\n '[])')\n self.assertEqual(comp, exp_comp)\n \n def test_rewrite_compclauses_recursive(self):\n class FooRewriter(NodeTransformer):\n def process(self, tree):\n self.new_clauses = []\n tree = super().process(tree)\n return tree, self.new_clauses\n \n def visit_Name(self, node):\n if node.id not in ['x', 'y']:\n return node\n newid = {'x': 'y', 'y': 'z'}[node.id]\n newcl = Enumerator(Name(newid, Store()),\n Name('R', Load()))\n self.new_clauses.append(newcl)\n return node._replace(id=(node.id + node.id))\n \n orig_comp = self.pe('COMP({x for x in S}, [])')\n \n comp = rewrite_compclauses(orig_comp, FooRewriter.run,\n enum_only=True, recursive=True)\n exp_comp = self.pe(\n 'COMP({x for z in R for yy in R for xx in S},'\n '[])')\n self.assertEqual(comp, exp_comp)\n \n def test_skeletonize(self):\n tree = self.p('''\n with MAINT(Q, 'after', 'S.add(1)'):\n S.add(1)\n a = 1\n for x in R:\n with MAINT(Q, 'before', 'T.add(2)'):\n c = 3\n T.add(2)\n b = 2\n ''')\n tree = maint_skeleton(tree)\n exp_tree = self.p('''\n with MAINT(Q, 'after', 'S.add(1)'):\n S.add(1)\n with MAINT(Q, 'before', 'T.add(2)'):\n pass\n T.add(2)\n ''')\n self.assertEqual(tree, exp_tree)\n \n def test_funcdeffinderlister(self):\n tree = self.p('''\n def f(a, b):\n print((a, b))\n def g(c):\n pass\n def g(d):\n pass\n ''')\n with self.assertRaises(AssertionError):\n FuncDefLister.run(tree, lambda n: True)\n \n f = FuncDefLister.run(tree, lambda n: n in ['f'])['f']\n exp_f = self.ps('def f(a, b): print((a, b))')\n self.assertEqual(f, exp_f)\n \n def test_demfuncmaker(self):\n maker = DemfuncMaker('Q', 'Qdesc', ('x', 'y'), None)\n \n code = maker.make_usetvars()\n exp_code = self.pc('''\n _U_Q = RCSet()\n _UEXT_Q = Set()\n ''')\n self.assertEqual(code, exp_code)\n \n code = maker.make_demfunc()\n exp_code = self.pc('''\n def demand_Q(x, y):\n 'Qdesc'\n if ((x, y) not in _U_Q):\n _U_Q.add((x, y))\n else:\n _U_Q.incref((x, y))\n ''')\n self.assertEqual(code, exp_code)\n \n code = maker.make_undemfunc()\n exp_code = self.pc('''\n def undemand_Q(x, y):\n 'Qdesc'\n if (_U_Q.getref((x, y)) == 1):\n _U_Q.remove((x, y))\n else:\n _U_Q.decref((x, y))\n ''')\n self.assertEqual(code, exp_code)\n \n code = maker.make_queryfunc()\n exp_code = self.pc('''\n def query_Q(x, y):\n 'Qdesc'\n if ((x, y) not in _UEXT_Q):\n _UEXT_Q.add((x, y))\n demand_Q(x, y)\n return True\n ''')\n self.assertEqual(code, exp_code)\n \n maker = DemfuncMaker('Q', 'Qdesc', ('x', 'y'), 100)\n \n code = maker.make_usetvars()\n exp_code = self.pc('''\n _U_Q = RCSet()\n _UEXT_Q = LRUSet()\n ''')\n self.assertEqual(code, exp_code)\n \n code = maker.make_queryfunc()\n exp_code = self.pc('''\n def query_Q(x, y):\n 'Qdesc'\n if ((x, y) not in _UEXT_Q):\n while (len(_UEXT_Q) >= 100):\n _top_v1, _top_v2 = _top = _UEXT_Q.peek()\n undemand_Q(_top_v1, _top_v2)\n _UEXT_Q.remove(_top)\n _UEXT_Q.add((x, y))\n demand_Q(x, y)\n else:\n _UEXT_Q.ping((x, y))\n return True\n ''')\n self.assertEqual(code, exp_code)\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.44730237126350403, "alphanum_fraction": 0.5178795456886292, "avg_line_length": 36.51764678955078, "blob_id": "03f0b6af90a0de438796e72ebb5b77bff10e2d6a", "content_id": "4009783d116cda000258b3939c691aaffe27fa7e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3188, "license_type": "no_license", "max_line_length": 104, "num_lines": 85, "path": "/incoq/tests/programs/comp/uset/uset_explicit_out.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from incoq.runtime import *\n# Comp1 := {(a, b, c) : a in _U_Comp1, (a, b, c) in R}\n_m_Comp1_bbu = Map()\ndef _maint__m_Comp1_bbu_add(_e):\n (v7_1, v7_2, v7_3) = _e\n if ((v7_1, v7_2) not in _m_Comp1_bbu):\n _m_Comp1_bbu[(v7_1, v7_2)] = set()\n _m_Comp1_bbu[(v7_1, v7_2)].add(v7_3)\n\ndef _maint__m_Comp1_bbu_remove(_e):\n (v8_1, v8_2, v8_3) = _e\n _m_Comp1_bbu[(v8_1, v8_2)].remove(v8_3)\n if (len(_m_Comp1_bbu[(v8_1, v8_2)]) == 0):\n del _m_Comp1_bbu[(v8_1, v8_2)]\n\n_m_R_buu = Map()\ndef _maint__m_R_buu_add(_e):\n (v5_1, v5_2, v5_3) = _e\n if (v5_1 not in _m_R_buu):\n _m_R_buu[v5_1] = set()\n _m_R_buu[v5_1].add((v5_2, v5_3))\n\ndef _maint_Comp1__U_Comp1_add(_e):\n # Iterate {(v1_a, v1_b, v1_c) : v1_a in deltamatch(_U_Comp1, 'b', _e, 1), (v1_a, v1_b, v1_c) in R}\n v1_a = _e\n for (v1_b, v1_c) in (_m_R_buu[v1_a] if (v1_a in _m_R_buu) else set()):\n # Begin maint _m_Comp1_bbu after \"Comp1.add((v1_a, v1_b, v1_c))\"\n _maint__m_Comp1_bbu_add((v1_a, v1_b, v1_c))\n # End maint _m_Comp1_bbu after \"Comp1.add((v1_a, v1_b, v1_c))\"\n\ndef _maint_Comp1__U_Comp1_remove(_e):\n # Iterate {(v2_a, v2_b, v2_c) : v2_a in deltamatch(_U_Comp1, 'b', _e, 1), (v2_a, v2_b, v2_c) in R}\n v2_a = _e\n for (v2_b, v2_c) in (_m_R_buu[v2_a] if (v2_a in _m_R_buu) else set()):\n # Begin maint _m_Comp1_bbu before \"Comp1.remove((v2_a, v2_b, v2_c))\"\n _maint__m_Comp1_bbu_remove((v2_a, v2_b, v2_c))\n # End maint _m_Comp1_bbu before \"Comp1.remove((v2_a, v2_b, v2_c))\"\n\ndef _maint_Comp1_R_add(_e):\n # Iterate {(v3_a, v3_b, v3_c) : v3_a in _U_Comp1, (v3_a, v3_b, v3_c) in deltamatch(R, 'bbb', _e, 1)}\n (v3_a, v3_b, v3_c) = _e\n if (v3_a in _U_Comp1):\n # Begin maint _m_Comp1_bbu after \"Comp1.add((v3_a, v3_b, v3_c))\"\n _maint__m_Comp1_bbu_add((v3_a, v3_b, v3_c))\n # End maint _m_Comp1_bbu after \"Comp1.add((v3_a, v3_b, v3_c))\"\n\n_U_Comp1 = RCSet()\n_UEXT_Comp1 = Set()\ndef demand_Comp1(a):\n '{(a, b, c) : a in _U_Comp1, (a, b, c) in R}'\n if (a not in _U_Comp1):\n _U_Comp1.add(a)\n # Begin maint Comp1 after \"_U_Comp1.add(a)\"\n _maint_Comp1__U_Comp1_add(a)\n # End maint Comp1 after \"_U_Comp1.add(a)\"\n else:\n _U_Comp1.incref(a)\n\ndef undemand_Comp1(a):\n '{(a, b, c) : a in _U_Comp1, (a, b, c) in R}'\n if (_U_Comp1.getref(a) == 1):\n # Begin maint Comp1 before \"_U_Comp1.remove(a)\"\n _maint_Comp1__U_Comp1_remove(a)\n # End maint Comp1 before \"_U_Comp1.remove(a)\"\n _U_Comp1.remove(a)\n else:\n _U_Comp1.decref(a)\n\ndef query_Comp1(a):\n '{(a, b, c) : a in _U_Comp1, (a, b, c) in R}'\n if (a not in _UEXT_Comp1):\n _UEXT_Comp1.add(a)\n demand_Comp1(a)\n return True\n\nfor (v1, v2, v3) in {(1, 2, 3), (2, 2, 3), (1, 3, 4)}:\n # Begin maint _m_R_buu after \"R.add((v1, v2, v3))\"\n _maint__m_R_buu_add((v1, v2, v3))\n # End maint _m_R_buu after \"R.add((v1, v2, v3))\"\n # Begin maint Comp1 after \"R.add((v1, v2, v3))\"\n _maint_Comp1_R_add((v1, v2, v3))\n # End maint Comp1 after \"R.add((v1, v2, v3))\"\na = 1\nb = 2\nprint(sorted((query_Comp1(a) and (_m_Comp1_bbu[(a, b)] if ((a, b) in _m_Comp1_bbu) else set()))))" }, { "alpha_fraction": 0.7333333492279053, "alphanum_fraction": 0.7333333492279053, "avg_line_length": 29, "blob_id": "40d13d486733bb484184efbf235c4a579453a3a4", "content_id": "95c463eee75c8008a9c4fcf4525f2923dbac72e6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 30, "license_type": "no_license", "max_line_length": 29, "num_lines": 1, "path": "/experiments/django/__init__.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from .run_django_exp import *\n" }, { "alpha_fraction": 0.5008635520935059, "alphanum_fraction": 0.5405872464179993, "avg_line_length": 21.30769157409668, "blob_id": "a36e573ca86010a20247bee5e7f6146b790d8367", "content_id": "21ebff8b3786df60d834f0f73b71b5a6a3b29319", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 579, "license_type": "no_license", "max_line_length": 56, "num_lines": 26, "path": "/incoq/tests/programs/comp/types_out.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from incoq.runtime import *\n# Comp1 := {x.a : x in S}\nComp1 = RCSet()\ndef _maint_Comp1_S_add(_e):\n # Iterate {v2_x : v2_x in deltamatch(S, 'b', _e, 1)}\n v2_x = _e\n if (v2_x.a not in Comp1):\n Comp1.add(v2_x.a)\n else:\n Comp1.incref(v2_x.a)\n\nclass T(Set):\n def __init__(self, v):\n self.a = v\n self.b = 0\n\n\nv1 = Set()\nS = v1\nfor elem in {T(1), T(2), T(3)}:\n S.add(elem)\n # Begin maint Comp1 after \"S.add(elem)\"\n _maint_Comp1_S_add(elem)\n # End maint Comp1 after \"S.add(elem)\"\nprint(sorted(Comp1))\nprint(sorted({x.b for x in S}))" }, { "alpha_fraction": 0.3621566593647003, "alphanum_fraction": 0.3801288604736328, "avg_line_length": 25.809091567993164, "blob_id": "43f5c9c5349857be047bc48c090ddecdccd3c408", "content_id": "87453c68e00c53c75871d1dbf93cac0d6d32e357", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2949, "license_type": "no_license", "max_line_length": 79, "num_lines": 110, "path": "/incoq/tests/util/test_str.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "###############################################################################\n# test_str.py #\n# Author: Jon Brandvein #\n###############################################################################\n\n\"\"\"Unit tests for the str module.\"\"\"\n\n\nimport unittest\n\nfrom incoq.util.str import *\n\n\nclass TestStrings(unittest.TestCase):\n \n def test_brace_items(self):\n text1 = brace_items(['a', 'b'], '<', '>')\n text2 = '<a>, <b>'\n self.assertEqual(text1, text2)\n \n def test_quote_items(self):\n text1 = quote_items(['a', 'b'])\n text2 = '\"a\", \"b\"'\n self.assertEqual(text1, text2)\n \n def test_tuple_str(self):\n text1 = tuple_str(['a', 'b'])\n text2 = '(a, b)'\n self.assertEqual(text1, text2)\n \n text3 = tuple_str(['a'])\n text4 = 'a'\n self.assertEqual(text3, text4)\n \n def test_from_tuple_str(self):\n names1 = from_tuple_str('a')\n names2 = ['a']\n self.assertEqual(names1, names2)\n \n names3 = from_tuple_str('(a,)')\n names4 = ['a']\n self.assertEqual(names3, names4)\n \n names5 = from_tuple_str('(a, b, c)')\n names6 = ['a', 'b', 'c']\n self.assertEqual(names5, names6)\n \n def test_dedent_trim(self):\n text1 = dedent_trim(\n \"\"\"\n for x in foo:\n print(x)\n \"\"\")\n exp_text1 = \"\"\"for x in foo:\\n print(x)\"\"\"\n \n self.assertEqual(text1, exp_text1)\n \n text2 = dedent_trim('')\n exp_text2 = ''\n \n self.assertEqual(text2, exp_text2)\n \n def test_join_lines(self):\n text1 = join_lines(['abc', 'def'], prefix='# ')\n text2 = dedent_trim(\n \"\"\"\n # abc\n # def\n \n \"\"\")\n self.assertEqual(text1, text2)\n \n def test_indent_lines(self):\n lines1 = indent_lines(['abc', 'def'], prefix='z')\n lines2 = ['zabc', 'zdef']\n self.assertEqual(lines1, lines2)\n \n lines1 = indent_lines(['abc', 'def'], prefix=2)\n lines2 = [' abc', ' def']\n self.assertEqual(lines1, lines2)\n \n def test_side_by_side(self):\n text1 = dedent_trim(\n \"\"\"\n a b c\n d e\n \n \"\"\")\n text2 = dedent_trim(\n \"\"\"\n \n \n u v\n w x y z\n \"\"\")\n exp_output = dedent_trim(\n \"\"\"\n .-------.---------.\n | a b c | | <<<\n | d e | |\n | | u v |\n | | w x y z |\n ^-------^---------^\n \"\"\")\n \n self.assertEqual(side_by_side(text1, text2, cmp=True), exp_output)\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.6079807877540588, "alphanum_fraction": 0.6081655025482178, "avg_line_length": 34.729373931884766, "blob_id": "b238b4773a5de29229624ef5361d43ba85a5cd9b", "content_id": "03069712748719126c1ced4b5653ea106e6eeff9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10826, "license_type": "no_license", "max_line_length": 78, "num_lines": 303, "path": "/incoq/compiler/central/options.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Transformation options management.\"\"\"\n\n\n__all__ = [\n 'OptionsManager',\n]\n\n\nfrom copy import deepcopy as dc\n\nfrom incoq.util.str import quote_items\nfrom incoq.util.collections import frozendict\nfrom incoq.compiler.incast import ProgramError\n\n\nclass DefaultNormalOptions:\n verbose = False\n \"\"\"If True, output extra information to stdout.\"\"\"\n \n eol = 'native'\n \"\"\"End-of-line markers to use for generated Python file.\n Can be 'lf', 'crlf', or 'native'.\n \"\"\"\n \n mode = 'normal'\n \"\"\"Action to take.\n 'normal': Transform/incrementalize\n 'outline': Only emit outline of maintenance code\n \"\"\"\n \n obj_domain = False\n \"\"\"If True, the input program is expressed in the object domain.\"\"\"\n obj_domain_out = True\n \"\"\"If False, the output program is left in the pair domain\n instead of being converted back to the object domain.\n \"\"\"\n \n input_rels = []\n \"\"\"List of names of sets in the input program that are relations\n and therefore do not need to be flattened. Relations must not be\n aliased or nested inside other values.\n \"\"\"\n \n autodetect_input_rels = False\n \"\"\"If True, automatically detect sets in the input program that\n can be considered as relations.\n \"\"\"\n \n pattern_in = False\n \"\"\"If True, the input program is expressed with pattern matching.\"\"\"\n pattern_out = False\n \"\"\"If True, the output program uses pattern matching. Note that\n the output program will not be runnable as Python code.\n \"\"\"\n \n flatten_rels = []\n \"\"\"List of names of relations to try to flatten. Applicable to\n relational domain programs.\n \"\"\"\n \n flatten_distalgo_messages = True\n \"\"\"Automatically add sets whose names fit the form of DistAlgo's\n received/sent message sets to flatten_rels. Also treat these sets\n as relations (input_rels).\n \"\"\"\n \n default_impl = 'batch'\n \"\"\"Default implementation for queries. One of 'batch', 'auxonly',\n 'inc', or 'dem'. \n \"\"\"\n \n default_uset_lru = None\n \"\"\"Default to use for query uset_lru.\"\"\"\n \n default_uset_force = False\n \"\"\"Default uset_force for queries using inc or dem.\"\"\"\n \n default_aggr_halfdemand = False\n \"\"\"Default aggr_halfdemand for queries.\"\"\"\n \n aggr_batch_fallback = True\n \"\"\"If True, allow aggregate queries to fallback on batch\n implementation when they don't fit the form we incrementalize.\n \"\"\"\n \n aggr_dem_fallback = True\n \"\"\"If True, allow aggregate queries specified to use inc to be\n upgraded to using dem if it is required due to 1) appearing in an\n incrementalized comprehension or 2) having a demand-driven operand.\n \"\"\"\n \n comp_dem_fallback = True\n \"\"\"If True, allow comprehension queries specified to use inc to be\n upgraded to using dem if it is required for handling a subquery.\n \"\"\" \n \n selfjoin_strat = 'das'\n \"\"\"Selects what implementation strategy to use to handle self-joins.\n Allowed values are 'das', 'sub', 'aug', 'assume_disjoint', and\n 'assume_disjoint_verify'.\n \"\"\"\n \n maint_emit_typechecks = True\n \"\"\"If False, omit the type checks around object-domain maintenance\n clauses.\n \"\"\"\n tag_checks = True\n \"\"\"If True, guard maintenance comprehensions with a demand check.\"\"\"\n single_tag = False\n \"\"\"If True, each query variable only gets one tag.\"\"\"\n subdem_tags = True\n \"\"\"If True, subquery demand invariants are defined based on tags\n for its demand parameters in the outer query. Otherwise, define\n subquery demand invariants based on the join of preceding clauses\n in the outer query.\n \"\"\"\n rc_elim = True\n \"\"\"If True, eliminate reference counts where possible.\"\"\"\n deadcode_elim = True\n \"\"\"If False, do not run deadcode elimination.\"\"\"\n deadcode_keepvars = []\n \"\"\"List of variable names to not eliminate as deadcode.\"\"\"\n \n clause_priorities = frozendict()\n \"\"\"Dictionary mapping from relation_mask identifiers to a numerical\n join heuristic ranking. See incoq/compiler/comp/order.py.\n \"\"\"\n \n maint_inline = False\n \"\"\"If True, maintenance code is inlined.\"\"\"\n \n analyze_costs = False\n \"\"\"If True, emit cost analysis information for each function.\"\"\"\n \n var_types = {}\n \"\"\"Mapping from variable names to strings that can be evaluated to\n produce a type term.\n \"\"\"\n \n obj_types = {}\n \"\"\"Mapping from names of object types to dictionaries that map\n from attribute name to attribute types (expressed as strings, as above).\n \"\"\"\n \n rewrite_costsastypes = True\n \"\"\"If True, rewrite cost terms using type domains. Thus, variable\n names get replaced by their types.\n \"\"\"\n \n nonstrict_sets = False\n \"\"\"If True, allow non-strict set updates in input program by\n rewriting them to be strict.\n \"\"\"\n nonstrict_fields = False\n \"\"\"As above but for object fields.\"\"\"\n nonstrict_maps = False\n \"\"\"As above but for maps.\"\"\"\n\n\nclass DefaultQueryOptions:\n params = ()\n \"\"\"Query parameters.\"\"\"\n uset_mode = 'uncon'\n \"\"\"Controls what parameters get tracked by the U-set.\n 'none': no U-set\n 'all': all parameters are tracked\n 'uncon': unconstrained parameters only\n 'explicit': consult uset_params\n \"\"\"\n uset_params = ()\n \"\"\"If uset_mode is 'explicit', parameters to track in the U-set.\"\"\"\n uset_force = None\n \"\"\"If True, use a nullary uset when incrementalizing with demand.\n If None, use default.\n \"\"\"\n uset_lru = None\n impl = None\n \"\"\"Implementation mode for a query.\n None: use global options to decide\n 'batch': batch computation\n 'auxonly': batch computation for aggregates, batch with\n incremental auxiliary maps for comprehensions\n 'inc': incremental computation\n 'dem': demand-filtered incremental computation\n \"\"\"\n aggr_halfdemand = None\n \"\"\"If True, aggregate queries using demand will use the\n \"half-demand\" strategy where possible. If None, use the global\n default value.\n \"\"\"\n notransform = False\n \"\"\"If True, do not transform, even if default_impl says to. This is\n for internal use by generated queries.\n \"\"\"\n maint_impl = 'auxonly'\n \"\"\"Implementation mode to use for maintenance comprehensions\n that help to incrementally compute this comprehension. Can be\n 'batch' or 'auxonly'.\n \"\"\"\n no_rc = False\n \"\"\"If True, omit reference counts for this comprehension, even\n if they would normally be required. Requires global option\n rc_elim be set to True.\n \"\"\"\n demand_reorder = None\n \"\"\"If not None, list of relative order of clauses for creating\n tag and filter dependencies (demand graph). This is a hack that\n doesn't treat the U-set differently from any other clause.\n \"\"\"\n \n _deltarel = None\n \"\"\"(Internal) If this join is a maintenance join, name of the\n delta relation for which it was formed. Otherwise None.\n \"\"\"\n _deltaelem = None\n \"\"\"(Internal) If this join is a maintenance join, source code\n for the delta element for which it was formed. Otherwise None.\n \"\"\"\n _deltalhs = None\n \"\"\"(Internal) If this join is a maintenance join, source code\n for the LHS of the delta clause for which it was formed.\n Otherwise None.\n \"\"\"\n _deltaop = None\n \"\"\"(Internal) If this join is a maintenance join, string\n indicating if the operation was 'add' or 'delete'. Otherwise\n None.\n \"\"\"\n \n _invalid = False\n \"\"\"(Internal) Set True if this query cannot be transformed\n due to not satisfying syntactic requirements.\n \"\"\"\n\ndefaultnormaloptions = {k: v for k, v in DefaultNormalOptions.__dict__.items()\n if not k.startswith('_')}\ndefaultqueryoptions = {k: v for k, v in DefaultQueryOptions.__dict__.items()\n if not k.startswith('_')}\n\n\nclass OptionsManager:\n \n \"\"\"Manages access to program options.\"\"\"\n \n normal_defaults = defaultnormaloptions\n query_defaults = defaultqueryoptions\n \n def __init__(self):\n self.nopts = {}\n \"\"\"Normal options.\"\"\"\n # Query options are stored on the individual query nodes.\n \n @classmethod\n def validate_nopts(cls, nopts):\n \"\"\"Check that all normal options keys are recognized.\"\"\"\n illegal_normal_keys = \\\n set(nopts.keys()) - set(cls.normal_defaults.keys())\n if illegal_normal_keys:\n raise ProgramError('Invalid options: ' +\n quote_items(illegal_normal_keys))\n \n @classmethod\n def validate_qopts(cls, qopts):\n \"\"\"Check that all query options keys are recognized.\"\"\"\n for d in qopts.values():\n illegal_query_keys = \\\n set(d.keys()) - set(cls.query_defaults.keys())\n if illegal_query_keys:\n raise ProgramError('Invalid query options: ' +\n quote_items(illegal_query_keys))\n \n def import_opts(self, nopts, qopts):\n \"\"\"Validate an opts structure, and set our nopts.\"\"\"\n self.validate_nopts(nopts)\n self.validate_qopts(qopts)\n self.nopts.update(nopts)\n \n def get_opt(self, key):\n \"\"\"Retrieve a normal option, using the default as a fallback.\"\"\"\n default = dc(self.normal_defaults[key])\n return self.nopts.get(key, default)\n \n def set_opt(self, key, value):\n \"\"\"Set a normal option.\"\"\"\n if key not in self.normal_defaults.keys():\n raise ProgramError('Invalid option: ' + key)\n self.nopts[key] = value\n \n def del_opt(self, key):\n \"\"\"Delete a normal option (so the default will be used).\"\"\"\n if key not in self.normal_defaults.keys():\n raise ProgramError('Invalid option: ' + key)\n self.nopts.pop(key, None)\n \n # Although query options are stored on the node, access is mediated\n # by the OptionsManager to catch invalid keys and access default\n # values. Write access to options requires rewriting the actual\n # query node.\n \n def get_queryopt(self, query, key):\n \"\"\"Retrieve a query option, using the default as a fallback.\"\"\"\n default = dc(self.query_defaults[key])\n return query.options.get(key, default)\n" }, { "alpha_fraction": 0.5470170378684998, "alphanum_fraction": 0.5473475456237793, "avg_line_length": 26.756879806518555, "blob_id": "d4847154ceee04faefb0c5fdd3c8d1094a6ffe16", "content_id": "de6caae0e5d4b879fa1b42f78a80067fc31e2a26", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6051, "license_type": "no_license", "max_line_length": 75, "num_lines": 218, "path": "/incoq/compiler/incast/nodes_untyped.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Node definitions for IncAST, without type information.\"\"\"\n\n\n__all__ = [\n 'native_nodes',\n 'incast_nodes',\n \n # Programmatically modified to include the keys of incast_nodes.\n]\n\n\nimport ast\nfrom iast.python.python34 import native_nodes as _native_nodes, py_nodes\n\nfrom incoq.util.collections import make_frozen\n\n# Flood the namespace with Struct nodes for PyASTs.\nglobals().update(py_nodes)\n\n\n# Names of nodes unique to IncAST.\nincast_node_names = [\n 'Comment',\n \n 'NOptions',\n 'QOptions',\n \n 'Maintenance',\n \n 'SetUpdate',\n 'MacroUpdate',\n 'RCSetRefUpdate',\n 'IsEmpty',\n 'GetRef',\n 'AssignKey',\n 'DelKey',\n 'Lookup',\n 'ImgLookup',\n 'RCImgLookup',\n 'SMLookup',\n \n 'DemQuery',\n 'NoDemQuery',\n \n 'SetMatch',\n 'DeltaMatch',\n 'Enumerator',\n 'Comp',\n 'Aggregate',\n]\n\n\n# The only new node in native format is Comment, for source\n# printing purposes.\n\nclass Comment(ast.stmt):\n _fields = ('text',) # string\n\n# Namespace for native nodes.\nnative_nodes = _native_nodes.copy()\nnative_nodes.update({\n 'Comment': Comment,\n})\n\ndel Comment\n\n\n# Definitions for nodes unique to IncAST.\n\n# Interesting detail: The typed versions of these nodes are\n# generated in nodes.py by stealing entries from the namespace\n# of their untyped equivalents. Since some of the below methods\n# use super(), we need the explicit form of super with args so\n# that they continue to work in the typed node classes, despite\n# having the wrong __class__ cell entry. __new__() also has to\n# work for the typed versions as well so it has to tolerate an\n# extra type field.\n\nclass Comment(stmt):\n _fields = ('text',) # string\n\nclass NOptions(stmt):\n _fields = ('opts',) # dictionary\n \n def __new__(cls, opts):\n opts = make_frozen(opts)\n return super(cls, cls).__new__(cls, opts)\n\nclass QOptions(stmt):\n _fields = ('query', # string\n 'opts') # dictionary\n \n def __new__(cls, query, opts):\n opts = make_frozen(opts)\n return super(cls, cls).__new__(cls, query, opts)\n\nclass Maintenance(stmt):\n _fields = ('name', # identifier\n 'desc', # string\n 'precode', # statement list\n 'update', # statement list\n 'postcode') # statement list\n\nclass SetUpdate(stmt):\n _fields = ('target', # expression\n 'op', # 'add' or 'remove'\n 'elem') # expression\n \n def is_varupdate(self):\n # Hackish. This only works for typed nodes, not untyped ones,\n # and has to import the typed version of the node module\n # to work. Should refactor.\n from .nodes import Name\n return isinstance(self.target, Name)\n \n def get_varupdate(self):\n assert self.is_varupdate()\n return self.target.id, self.op, self.elem\n\nclass MacroUpdate(stmt):\n _fields = ('target', # expression\n 'op', # 'union', 'inter', 'diff', 'symdiff',\n # 'assign', 'clear', 'mapassign', or 'mapclear'\n 'other') # expression or None\n\nclass RCSetRefUpdate(stmt):\n _fields = ('target', # expression\n 'op', # 'incref' or 'decref'\n 'elem') # expression \n\nclass IsEmpty(expr):\n _fields = ('target',) # expression\n\nclass GetRef(expr):\n _fields = ('target', # expression\n 'elem') # expression\n\nclass AssignKey(stmt):\n _fields = ('target', # expression\n 'key', # expression\n 'value') # expression\n\nclass DelKey(stmt):\n _fields = ('target', # expression\n 'key') # expression\n\nclass Lookup(expr):\n _fields = ('target', # expression\n 'key', # expression\n 'default') # expression or None\n\nclass ImgLookup(expr):\n _fields = ('target', # expression\n 'key') # expression\n\nclass RCImgLookup(expr):\n _fields = ('target', # expression\n 'key') # expression\n\nclass SMLookup(expr):\n _fields = ('target', # expression\n 'mask', # string\n 'key', # expression\n 'default') # expression or None\n\nclass DemQuery(expr):\n _fields = ('demname', # string\n 'args', # expression list\n 'value') # expression option\n\nclass NoDemQuery(expr):\n _fields = ('value',) # expression\n\nclass SetMatch(expr):\n _fields = ('target', # expression\n 'mask', # string\n 'key') # expression\n\nclass DeltaMatch(expr):\n _fields = ('target', # expression\n 'mask', # string\n 'elem', # expression\n 'limit') # integer\n\nclass Enumerator(AST):\n _fields = ('target', # expression\n 'iter') # expression\n\nclass Comp(expr):\n _fields = ('resexp', # expression\n 'clauses', # list of Enumerator and expression nodes\n 'params', # identifier list, or None\n 'options') # dictionary, or None\n \n def __new__(cls, resexp, clauses, params, options, *args, **kargs):\n options = make_frozen(options)\n return super(cls, cls).__new__(\n cls, resexp, clauses, params, options,\n *args, **kargs)\n\nclass Aggregate(expr):\n _fields = ('value', # expression\n 'op', # operation string\n 'options') # dictionary\n \n def __new__(cls, value, op, options, *args, **kargs):\n options = make_frozen(options)\n return super(cls, cls).__new__(cls, value, op, options,\n *args, **kargs)\n\n\n# Namespace for IncAST nodes.\nnew_incast_nodes = {name: globals()[name]\n for name in incast_node_names}\nincast_nodes = py_nodes.copy()\nincast_nodes.update(new_incast_nodes)\n\n__all__.extend(incast_nodes.keys())\n" }, { "alpha_fraction": 0.38344433903694153, "alphanum_fraction": 0.38344433903694153, "avg_line_length": 25.94871711730957, "blob_id": "85608603349daf34796284d32281755be7a2ae0b", "content_id": "362eb462bda2d0ca39816ad438f4cbae40298b4e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1051, "license_type": "no_license", "max_line_length": 71, "num_lines": 39, "path": "/incoq/tests/util/test_unify.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Unit tests for unify.py.\"\"\"\n\n\nimport unittest\n\nfrom incoq.util.unify import *\n\n\nclass UnifyCase(unittest.TestCase):\n \n def test_unify(self):\n # f(g(x), y) = f(g(h(z)), y)\n eqs = [(('f', ('g', 'x'), 'y'), ('f', ('g', ('h', 'z')), 'y'))]\n subst = unify(eqs)\n exp_subst = {'x': ('h', 'z')}\n self.assertEqual(subst, exp_subst)\n \n # f(x, x) = f(a, a)\n eqs = [(('f', 'x', 'x'), ('f', ('a',), ('a',)))]\n subst = unify(eqs)\n exp_subst = {'x': ('a',)}\n self.assertEqual(subst, exp_subst)\n \n # f(x, g(x)) = f(a, g(b))\n eqs = [(('f', 'x', ('g', 'x')), ('f', ('a',), ('g', ('b',))))]\n subst = unify(eqs)\n exp_subst = None\n self.assertEqual(subst, exp_subst)\n \n # f(x, y) = f(_, z)\n eqs = [(('f', 'x', 'y'), ('f', '_', 'z')),\n (('f', 'x', 'y'), '_')]\n subst = unify(eqs)\n exp_subst = {'y': 'z'}\n self.assertEqual(subst, exp_subst)\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.44349315762519836, "alphanum_fraction": 0.5102739930152893, "avg_line_length": 29.789474487304688, "blob_id": "8927b32f396c896f8c3365032343e1397c58d5f4", "content_id": "26f89893a740697e62419a58e1bfe33661f4c261", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 584, "license_type": "no_license", "max_line_length": 73, "num_lines": 19, "path": "/incoq/tests/programs/comp/expr_out.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from incoq.runtime import *\n# Comp1 := {(f(x), (y + 1), None) : (x, y) in S}\nComp1 = RCSet()\ndef _maint_Comp1_S_add(_e):\n # Iterate {(v1_x, v1_y) : (v1_x, v1_y) in deltamatch(S, 'bb', _e, 1)}\n (v1_x, v1_y) = _e\n if ((f(v1_x), (v1_y + 1), None) not in Comp1):\n Comp1.add((f(v1_x), (v1_y + 1), None))\n else:\n Comp1.incref((f(v1_x), (v1_y + 1), None))\n\ndef f(y):\n return True\n\nfor (v1, v2) in [(1, 2), (3, 4)]:\n # Begin maint Comp1 after \"S.add((v1, v2))\"\n _maint_Comp1_S_add((v1, v2))\n # End maint Comp1 after \"S.add((v1, v2))\"\nprint(sorted(Comp1))" }, { "alpha_fraction": 0.4550517201423645, "alphanum_fraction": 0.5205757021903992, "avg_line_length": 38.85014343261719, "blob_id": "e7edd5a6ea85a2357ada2224ec6f05346698602c", "content_id": "4a9e2657f908671b6869d9e1cab83b0f908f607e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13827, "license_type": "no_license", "max_line_length": 195, "num_lines": 347, "path": "/incoq/tests/programs/aggr/nested/compdem_out.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from incoq.runtime import *\n# Aggr1 := sum(S, None)\n# Comp1 := {(x, y) : x in _U_Comp1, (x, y) in E, _av1 in {Aggr1.smlookup('u', (), None)}, (y < _av1)}\n# Comp1_Tx1 := {x : x in _U_Comp1}\n# Comp1_dE := {(x, y) : x in Comp1_Tx1, (x, y) in E}\n# Aggr1_delta := {() : _ in _U_Comp1}\n_m_Comp1_out = Map()\ndef _maint__m_Comp1_out_add(_e):\n (v32_1, v32_2) = _e\n if (v32_1 not in _m_Comp1_out):\n _m_Comp1_out[v32_1] = set()\n _m_Comp1_out[v32_1].add(v32_2)\n\ndef _maint__m_Comp1_out_remove(_e):\n (v33_1, v33_2) = _e\n _m_Comp1_out[v33_1].remove(v33_2)\n if (len(_m_Comp1_out[v33_1]) == 0):\n del _m_Comp1_out[v33_1]\n\n_m_S_u = Map()\ndef _maint__m_S_u_add(_e):\n v30_1 = _e\n if (() not in _m_S_u):\n _m_S_u[()] = set()\n _m_S_u[()].add(v30_1)\n\n_m_Aggr1_u = Map()\ndef _maint__m_Aggr1_u_add(_e):\n v28_1 = _e\n if (() not in _m_Aggr1_u):\n _m_Aggr1_u[()] = set()\n _m_Aggr1_u[()].add(v28_1)\n\ndef _maint__m_Aggr1_u_remove(_e):\n v29_1 = _e\n _m_Aggr1_u[()].remove(v29_1)\n if (len(_m_Aggr1_u[()]) == 0):\n del _m_Aggr1_u[()]\n\n_m_Comp1_dE_out = Map()\ndef _maint__m_Comp1_dE_out_add(_e):\n (v26_1, v26_2) = _e\n if (v26_1 not in _m_Comp1_dE_out):\n _m_Comp1_dE_out[v26_1] = set()\n _m_Comp1_dE_out[v26_1].add(v26_2)\n\ndef _maint__m_Comp1_dE_out_remove(_e):\n (v27_1, v27_2) = _e\n _m_Comp1_dE_out[v27_1].remove(v27_2)\n if (len(_m_Comp1_dE_out[v27_1]) == 0):\n del _m_Comp1_dE_out[v27_1]\n\n_m_E_out = Map()\ndef _maint__m_E_out_add(_e):\n (v24_1, v24_2) = _e\n if (v24_1 not in _m_E_out):\n _m_E_out[v24_1] = set()\n _m_E_out[v24_1].add(v24_2)\n\n_m__U_Comp1_w = Map()\ndef _maint__m__U_Comp1_w_add(_e):\n if (() not in _m__U_Comp1_w):\n _m__U_Comp1_w[()] = RCSet()\n if (() not in _m__U_Comp1_w[()]):\n _m__U_Comp1_w[()].add(())\n else:\n _m__U_Comp1_w[()].incref(())\n\ndef _maint__m__U_Comp1_w_remove(_e):\n if (_m__U_Comp1_w[()].getref(()) == 1):\n _m__U_Comp1_w[()].remove(())\n else:\n _m__U_Comp1_w[()].decref(())\n if (len(_m__U_Comp1_w[()]) == 0):\n del _m__U_Comp1_w[()]\n\nAggr1_delta = RCSet()\ndef _maint_Aggr1_delta__U_Comp1_add(_e):\n # Iterate {() : _ in deltamatch(_U_Comp1, 'w', _e, 1)}\n for _ in setmatch(({_e} if ((_m__U_Comp1_w[()] if (() in _m__U_Comp1_w) else RCSet()).getref(()) == 1) else {}), 'w', ()):\n Aggr1_delta.add(())\n\nComp1_dE = RCSet()\ndef _maint_Comp1_dE_Comp1_Tx1_add(_e):\n # Iterate {(v13_x, v13_y) : v13_x in deltamatch(Comp1_Tx1, 'b', _e, 1), (v13_x, v13_y) in E}\n v13_x = _e\n for v13_y in (_m_E_out[v13_x] if (v13_x in _m_E_out) else set()):\n Comp1_dE.add((v13_x, v13_y))\n # Begin maint _m_Comp1_dE_out after \"Comp1_dE.add((v13_x, v13_y))\"\n _maint__m_Comp1_dE_out_add((v13_x, v13_y))\n # End maint _m_Comp1_dE_out after \"Comp1_dE.add((v13_x, v13_y))\"\n\ndef _maint_Comp1_dE_Comp1_Tx1_remove(_e):\n # Iterate {(v14_x, v14_y) : v14_x in deltamatch(Comp1_Tx1, 'b', _e, 1), (v14_x, v14_y) in E}\n v14_x = _e\n for v14_y in (_m_E_out[v14_x] if (v14_x in _m_E_out) else set()):\n # Begin maint _m_Comp1_dE_out before \"Comp1_dE.remove((v14_x, v14_y))\"\n _maint__m_Comp1_dE_out_remove((v14_x, v14_y))\n # End maint _m_Comp1_dE_out before \"Comp1_dE.remove((v14_x, v14_y))\"\n Comp1_dE.remove((v14_x, v14_y))\n\ndef _maint_Comp1_dE_E_add(_e):\n # Iterate {(v15_x, v15_y) : v15_x in Comp1_Tx1, (v15_x, v15_y) in deltamatch(E, 'bb', _e, 1)}\n (v15_x, v15_y) = _e\n if (v15_x in Comp1_Tx1):\n Comp1_dE.add((v15_x, v15_y))\n # Begin maint _m_Comp1_dE_out after \"Comp1_dE.add((v15_x, v15_y))\"\n _maint__m_Comp1_dE_out_add((v15_x, v15_y))\n # End maint _m_Comp1_dE_out after \"Comp1_dE.add((v15_x, v15_y))\"\n\nComp1_Tx1 = RCSet()\ndef _maint_Comp1_Tx1__U_Comp1_add(_e):\n # Iterate {v11_x : v11_x in deltamatch(_U_Comp1, 'b', _e, 1)}\n v11_x = _e\n Comp1_Tx1.add(v11_x)\n # Begin maint Comp1_dE after \"Comp1_Tx1.add(v11_x)\"\n _maint_Comp1_dE_Comp1_Tx1_add(v11_x)\n # End maint Comp1_dE after \"Comp1_Tx1.add(v11_x)\"\n\ndef _maint_Comp1_Tx1__U_Comp1_remove(_e):\n # Iterate {v12_x : v12_x in deltamatch(_U_Comp1, 'b', _e, 1)}\n v12_x = _e\n # Begin maint Comp1_dE before \"Comp1_Tx1.remove(v12_x)\"\n _maint_Comp1_dE_Comp1_Tx1_remove(v12_x)\n # End maint Comp1_dE before \"Comp1_Tx1.remove(v12_x)\"\n Comp1_Tx1.remove(v12_x)\n\nComp1 = RCSet()\ndef _maint_Comp1__U_Comp1_add(_e):\n # Iterate {(v5_x, v5_y, v5__av1) : v5_x in deltamatch(_U_Comp1, 'b', _e, 1), (v5_x, v5_y) in Comp1_dE, v5__av1 in {Aggr1.smlookup('u', (), None)}, (v5_y < v5__av1)}\n v5_x = _e\n for v5__av1 in Aggr1:\n for v5_y in (_m_Comp1_dE_out[v5_x] if (v5_x in _m_Comp1_dE_out) else set()):\n if (v5_y < v5__av1):\n if ((v5_x, v5_y) not in Comp1):\n Comp1.add((v5_x, v5_y))\n # Begin maint _m_Comp1_out after \"Comp1.add((v5_x, v5_y))\"\n _maint__m_Comp1_out_add((v5_x, v5_y))\n # End maint _m_Comp1_out after \"Comp1.add((v5_x, v5_y))\"\n else:\n Comp1.incref((v5_x, v5_y))\n\ndef _maint_Comp1__U_Comp1_remove(_e):\n # Iterate {(v6_x, v6_y, v6__av1) : v6_x in deltamatch(_U_Comp1, 'b', _e, 1), (v6_x, v6_y) in Comp1_dE, v6__av1 in {Aggr1.smlookup('u', (), None)}, (v6_y < v6__av1)}\n v6_x = _e\n for v6__av1 in Aggr1:\n for v6_y in (_m_Comp1_dE_out[v6_x] if (v6_x in _m_Comp1_dE_out) else set()):\n if (v6_y < v6__av1):\n if (Comp1.getref((v6_x, v6_y)) == 1):\n # Begin maint _m_Comp1_out before \"Comp1.remove((v6_x, v6_y))\"\n _maint__m_Comp1_out_remove((v6_x, v6_y))\n # End maint _m_Comp1_out before \"Comp1.remove((v6_x, v6_y))\"\n Comp1.remove((v6_x, v6_y))\n else:\n Comp1.decref((v6_x, v6_y))\n\ndef _maint_Comp1_E_add(_e):\n # Iterate {(v7_x, v7_y, v7__av1) : v7_x in _U_Comp1, (v7_x, v7_y) in deltamatch(Comp1_dE, 'bb', _e, 1), (v7_x, v7_y) in Comp1_dE, v7__av1 in {Aggr1.smlookup('u', (), None)}, (v7_y < v7__av1)}\n (v7_x, v7_y) = _e\n if (v7_x in _U_Comp1):\n if ((v7_x, v7_y) in Comp1_dE):\n for v7__av1 in Aggr1:\n if (v7_y < v7__av1):\n if ((v7_x, v7_y) not in Comp1):\n Comp1.add((v7_x, v7_y))\n # Begin maint _m_Comp1_out after \"Comp1.add((v7_x, v7_y))\"\n _maint__m_Comp1_out_add((v7_x, v7_y))\n # End maint _m_Comp1_out after \"Comp1.add((v7_x, v7_y))\"\n else:\n Comp1.incref((v7_x, v7_y))\n\ndef _maint_Comp1_Aggr1_add(_e):\n # Iterate {(v9_x, v9_y, v9__av1) : v9_x in _U_Comp1, (v9_x, v9_y) in Comp1_dE, v9__av1 in deltamatch(Aggr1, 'b', _e, 1), (v9_y < v9__av1)}\n v9__av1 = _e\n for v9_x in _U_Comp1:\n for v9_y in (_m_Comp1_dE_out[v9_x] if (v9_x in _m_Comp1_dE_out) else set()):\n if (v9_y < v9__av1):\n if ((v9_x, v9_y) not in Comp1):\n Comp1.add((v9_x, v9_y))\n # Begin maint _m_Comp1_out after \"Comp1.add((v9_x, v9_y))\"\n _maint__m_Comp1_out_add((v9_x, v9_y))\n # End maint _m_Comp1_out after \"Comp1.add((v9_x, v9_y))\"\n else:\n Comp1.incref((v9_x, v9_y))\n\ndef _maint_Comp1_Aggr1_remove(_e):\n # Iterate {(v10_x, v10_y, v10__av1) : v10_x in _U_Comp1, (v10_x, v10_y) in Comp1_dE, v10__av1 in deltamatch(Aggr1, 'b', _e, 1), (v10_y < v10__av1)}\n v10__av1 = _e\n for v10_x in _U_Comp1:\n for v10_y in (_m_Comp1_dE_out[v10_x] if (v10_x in _m_Comp1_dE_out) else set()):\n if (v10_y < v10__av1):\n if (Comp1.getref((v10_x, v10_y)) == 1):\n # Begin maint _m_Comp1_out before \"Comp1.remove((v10_x, v10_y))\"\n _maint__m_Comp1_out_remove((v10_x, v10_y))\n # End maint _m_Comp1_out before \"Comp1.remove((v10_x, v10_y))\"\n Comp1.remove((v10_x, v10_y))\n else:\n Comp1.decref((v10_x, v10_y))\n\n_U_Comp1 = RCSet()\n_UEXT_Comp1 = Set()\ndef demand_Comp1(x):\n \"{(x, y) : x in _U_Comp1, (x, y) in E, _av1 in {Aggr1.smlookup('u', (), None)}, (y < _av1)}\"\n if (x not in _U_Comp1):\n _U_Comp1.add(x)\n # Begin maint _m__U_Comp1_w after \"_U_Comp1.add(x)\"\n _maint__m__U_Comp1_w_add(x)\n # End maint _m__U_Comp1_w after \"_U_Comp1.add(x)\"\n # Begin maint Aggr1_delta after \"_U_Comp1.add(x)\"\n _maint_Aggr1_delta__U_Comp1_add(x)\n # End maint Aggr1_delta after \"_U_Comp1.add(x)\"\n # Begin maint Comp1_Tx1 after \"_U_Comp1.add(x)\"\n _maint_Comp1_Tx1__U_Comp1_add(x)\n # End maint Comp1_Tx1 after \"_U_Comp1.add(x)\"\n # Begin maint Comp1 after \"_U_Comp1.add(x)\"\n _maint_Comp1__U_Comp1_add(x)\n # End maint Comp1 after \"_U_Comp1.add(x)\"\n # Begin maint demand_Aggr1 after \"_U_Comp1.add(x)\"\n for _ in Aggr1_delta.elements():\n demand_Aggr1()\n Aggr1_delta.clear()\n # End maint demand_Aggr1 after \"_U_Comp1.add(x)\"\n else:\n _U_Comp1.incref(x)\n\ndef undemand_Comp1(x):\n \"{(x, y) : x in _U_Comp1, (x, y) in E, _av1 in {Aggr1.smlookup('u', (), None)}, (y < _av1)}\"\n if (_U_Comp1.getref(x) == 1):\n # Begin maint Comp1 before \"_U_Comp1.remove(x)\"\n _maint_Comp1__U_Comp1_remove(x)\n # End maint Comp1 before \"_U_Comp1.remove(x)\"\n # Begin maint Comp1_Tx1 before \"_U_Comp1.remove(x)\"\n _maint_Comp1_Tx1__U_Comp1_remove(x)\n # End maint Comp1_Tx1 before \"_U_Comp1.remove(x)\"\n # Begin maint Aggr1_delta before \"_U_Comp1.remove(x)\"\n _maint_Aggr1_delta__U_Comp1_add(x)\n # End maint Aggr1_delta before \"_U_Comp1.remove(x)\"\n # Begin maint _m__U_Comp1_w before \"_U_Comp1.remove(x)\"\n _maint__m__U_Comp1_w_remove(x)\n # End maint _m__U_Comp1_w before \"_U_Comp1.remove(x)\"\n _U_Comp1.remove(x)\n # Begin maint undemand_Aggr1 after \"_U_Comp1.remove(x)\"\n for _ in Aggr1_delta.elements():\n undemand_Aggr1()\n Aggr1_delta.clear()\n # End maint undemand_Aggr1 after \"_U_Comp1.remove(x)\"\n else:\n _U_Comp1.decref(x)\n\ndef query_Comp1(x):\n \"{(x, y) : x in _U_Comp1, (x, y) in E, _av1 in {Aggr1.smlookup('u', (), None)}, (y < _av1)}\"\n if (x not in _UEXT_Comp1):\n _UEXT_Comp1.add(x)\n demand_Comp1(x)\n return True\n\nAggr1 = Set()\ndef _maint_Aggr1_add(_e):\n v1_v1 = _e\n if (() in _U_Aggr1):\n v1_val = _m_Aggr1_u.singlelookup(())\n v1_val = (v1_val + v1_v1)\n v1_elem = _m_Aggr1_u.singlelookup(())\n # Begin maint Comp1 before \"Aggr1.remove(v1_elem)\"\n _maint_Comp1_Aggr1_remove(v1_elem)\n # End maint Comp1 before \"Aggr1.remove(v1_elem)\"\n # Begin maint _m_Aggr1_u before \"Aggr1.remove(v1_elem)\"\n _maint__m_Aggr1_u_remove(v1_elem)\n # End maint _m_Aggr1_u before \"Aggr1.remove(v1_elem)\"\n Aggr1.remove(v1_elem)\n Aggr1.add(v1_val)\n # Begin maint _m_Aggr1_u after \"Aggr1.add(v1_val)\"\n _maint__m_Aggr1_u_add(v1_val)\n # End maint _m_Aggr1_u after \"Aggr1.add(v1_val)\"\n # Begin maint Comp1 after \"Aggr1.add(v1_val)\"\n _maint_Comp1_Aggr1_add(v1_val)\n # End maint Comp1 after \"Aggr1.add(v1_val)\"\n\n_U_Aggr1 = RCSet()\n_UEXT_Aggr1 = Set()\ndef demand_Aggr1():\n 'sum(S, None)'\n if (() not in _U_Aggr1):\n _U_Aggr1.add(())\n # Begin maint Aggr1 after \"_U_Aggr1.add(())\"\n v3_val = 0\n for v3_elem in (_m_S_u[()] if (() in _m_S_u) else set()):\n v3_val = (v3_val + v3_elem)\n Aggr1.add(v3_val)\n # Begin maint _m_Aggr1_u after \"Aggr1.add(v3_val)\"\n _maint__m_Aggr1_u_add(v3_val)\n # End maint _m_Aggr1_u after \"Aggr1.add(v3_val)\"\n # Begin maint Comp1 after \"Aggr1.add(v3_val)\"\n _maint_Comp1_Aggr1_add(v3_val)\n # End maint Comp1 after \"Aggr1.add(v3_val)\"\n # End maint Aggr1 after \"_U_Aggr1.add(())\"\n else:\n _U_Aggr1.incref(())\n\ndef undemand_Aggr1():\n 'sum(S, None)'\n if (_U_Aggr1.getref(()) == 1):\n # Begin maint Aggr1 before \"_U_Aggr1.remove(())\"\n v4_elem = _m_Aggr1_u.singlelookup(())\n # Begin maint Comp1 before \"Aggr1.remove(v4_elem)\"\n _maint_Comp1_Aggr1_remove(v4_elem)\n # End maint Comp1 before \"Aggr1.remove(v4_elem)\"\n # Begin maint _m_Aggr1_u before \"Aggr1.remove(v4_elem)\"\n _maint__m_Aggr1_u_remove(v4_elem)\n # End maint _m_Aggr1_u before \"Aggr1.remove(v4_elem)\"\n Aggr1.remove(v4_elem)\n # End maint Aggr1 before \"_U_Aggr1.remove(())\"\n _U_Aggr1.remove(())\n else:\n _U_Aggr1.decref(())\n\ndef query_Aggr1():\n 'sum(S, None)'\n if (() not in _UEXT_Aggr1):\n _UEXT_Aggr1.add(())\n demand_Aggr1()\n return True\n\nfor e in [1, 2, 3, 4]:\n # Begin maint _m_S_u after \"S.add(e)\"\n _maint__m_S_u_add(e)\n # End maint _m_S_u after \"S.add(e)\"\n # Begin maint Aggr1 after \"S.add(e)\"\n _maint_Aggr1_add(e)\n # End maint Aggr1 after \"S.add(e)\"\nfor e in [(1, 5), (1, 8), (1, 15), (2, 9), (2, 18)]:\n # Begin maint _m_E_out after \"E.add(e)\"\n _maint__m_E_out_add(e)\n # End maint _m_E_out after \"E.add(e)\"\n # Begin maint Comp1_dE after \"E.add(e)\"\n _maint_Comp1_dE_E_add(e)\n # End maint Comp1_dE after \"E.add(e)\"\n # Begin maint Comp1 after \"E.add(e)\"\n _maint_Comp1_E_add(e)\n # End maint Comp1 after \"E.add(e)\"\n # Begin maint demand_Aggr1 after \"E.add(e)\"\n for _ in Aggr1_delta.elements():\n demand_Aggr1()\n Aggr1_delta.clear()\n # End maint demand_Aggr1 after \"E.add(e)\"\nx = 1\nprint(sorted((query_Comp1(x) and (_m_Comp1_out[x] if (x in _m_Comp1_out) else set()))))" }, { "alpha_fraction": 0.44458433985710144, "alphanum_fraction": 0.5329060554504395, "avg_line_length": 41.00640869140625, "blob_id": "6b43c3021a08d1f5a5aa3885ac6302ab4e644f50", "content_id": "6ae913503ea12ad38ff0dc61b796d8457c27ba4a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 26211, "license_type": "no_license", "max_line_length": 186, "num_lines": 624, "path": "/incoq/tests/programs/aggr/obj_out.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from incoq.runtime import *\n# Comp1 := {(s, _e) : s in _U_Comp1, (s, _e) in _M}\n# Comp1_Ts := {s : s in _U_Comp1}\n# Comp1_d_M := {(s, _e) : s in Comp1_Ts, (s, _e) in _M}\n# Aggr1 := sum(DEMQUERY(Comp1, [s], setmatch(Comp1, 'bu', s)), None)\n# Comp12 := {(o, _e) : o in _U_Comp12, (o, o_f) in _F_f, (o_f, _e) in _M}\n# Comp12_To := {o : o in _U_Comp12}\n# Comp12_d_F_f := {(o, o_f) : o in Comp12_To, (o, o_f) in _F_f}\n# Comp12_To_f := {o_f : (o, o_f) in Comp12_d_F_f}\n# Comp12_d_M := {(o_f, _e) : o_f in Comp12_To_f, (o_f, _e) in _M}\n# Aggr2 := sum(DEMQUERY(Comp12, [o], setmatch(Comp12, 'bu', o)), None)\n_m_Comp1_out = Map()\ndef _maint__m_Comp1_out_add(_e):\n (v45_1, v45_2) = _e\n if (v45_1 not in _m_Comp1_out):\n _m_Comp1_out[v45_1] = set()\n _m_Comp1_out[v45_1].add(v45_2)\n\ndef _maint__m_Comp1_out_remove(_e):\n (v46_1, v46_2) = _e\n _m_Comp1_out[v46_1].remove(v46_2)\n if (len(_m_Comp1_out[v46_1]) == 0):\n del _m_Comp1_out[v46_1]\n\n_m_Aggr1_out = Map()\ndef _maint__m_Aggr1_out_add(_e):\n (v43_1, v43_2) = _e\n if (v43_1 not in _m_Aggr1_out):\n _m_Aggr1_out[v43_1] = set()\n _m_Aggr1_out[v43_1].add(v43_2)\n\ndef _maint__m_Aggr1_out_remove(_e):\n (v44_1, v44_2) = _e\n _m_Aggr1_out[v44_1].remove(v44_2)\n if (len(_m_Aggr1_out[v44_1]) == 0):\n del _m_Aggr1_out[v44_1]\n\n_m_Comp12_d_F_f_in = Map()\ndef _maint__m_Comp12_d_F_f_in_add(_e):\n (v41_1, v41_2) = _e\n if (v41_2 not in _m_Comp12_d_F_f_in):\n _m_Comp12_d_F_f_in[v41_2] = set()\n _m_Comp12_d_F_f_in[v41_2].add(v41_1)\n\ndef _maint__m_Comp12_d_F_f_in_remove(_e):\n (v42_1, v42_2) = _e\n _m_Comp12_d_F_f_in[v42_2].remove(v42_1)\n if (len(_m_Comp12_d_F_f_in[v42_2]) == 0):\n del _m_Comp12_d_F_f_in[v42_2]\n\n_m_Comp12_out = Map()\ndef _maint__m_Comp12_out_add(_e):\n (v39_1, v39_2) = _e\n if (v39_1 not in _m_Comp12_out):\n _m_Comp12_out[v39_1] = set()\n _m_Comp12_out[v39_1].add(v39_2)\n\ndef _maint__m_Comp12_out_remove(_e):\n (v40_1, v40_2) = _e\n _m_Comp12_out[v40_1].remove(v40_2)\n if (len(_m_Comp12_out[v40_1]) == 0):\n del _m_Comp12_out[v40_1]\n\n_m_Aggr2_out = Map()\ndef _maint__m_Aggr2_out_add(_e):\n (v37_1, v37_2) = _e\n if (v37_1 not in _m_Aggr2_out):\n _m_Aggr2_out[v37_1] = set()\n _m_Aggr2_out[v37_1].add(v37_2)\n\ndef _maint__m_Aggr2_out_remove(_e):\n (v38_1, v38_2) = _e\n _m_Aggr2_out[v38_1].remove(v38_2)\n if (len(_m_Aggr2_out[v38_1]) == 0):\n del _m_Aggr2_out[v38_1]\n\ndef _maint_Aggr2_add(_e):\n (v33_v1, v33_v2) = _e\n if (v33_v1 in _U_Aggr2):\n v33_val = _m_Aggr2_out.singlelookup(v33_v1)\n v33_val = (v33_val + v33_v2)\n v33_1 = v33_v1\n v33_elem = _m_Aggr2_out.singlelookup(v33_v1)\n # Begin maint _m_Aggr2_out before \"Aggr2.remove((v33_1, v33_elem))\"\n _maint__m_Aggr2_out_remove((v33_1, v33_elem))\n # End maint _m_Aggr2_out before \"Aggr2.remove((v33_1, v33_elem))\"\n # Begin maint _m_Aggr2_out after \"Aggr2.add((v33_1, v33_val))\"\n _maint__m_Aggr2_out_add((v33_1, v33_val))\n # End maint _m_Aggr2_out after \"Aggr2.add((v33_1, v33_val))\"\n\ndef _maint_Aggr2_remove(_e):\n (v34_v1, v34_v2) = _e\n if (v34_v1 in _U_Aggr2):\n v34_val = _m_Aggr2_out.singlelookup(v34_v1)\n v34_val = (v34_val - v34_v2)\n v34_1 = v34_v1\n v34_elem = _m_Aggr2_out.singlelookup(v34_v1)\n # Begin maint _m_Aggr2_out before \"Aggr2.remove((v34_1, v34_elem))\"\n _maint__m_Aggr2_out_remove((v34_1, v34_elem))\n # End maint _m_Aggr2_out before \"Aggr2.remove((v34_1, v34_elem))\"\n # Begin maint _m_Aggr2_out after \"Aggr2.add((v34_1, v34_val))\"\n _maint__m_Aggr2_out_add((v34_1, v34_val))\n # End maint _m_Aggr2_out after \"Aggr2.add((v34_1, v34_val))\"\n\n_U_Aggr2 = RCSet()\n_UEXT_Aggr2 = Set()\ndef demand_Aggr2(o):\n \"sum(DEMQUERY(Comp12, [o], setmatch(Comp12, 'bu', o)), None)\"\n if (o not in _U_Aggr2):\n _U_Aggr2.add(o)\n # Begin maint Aggr2 after \"_U_Aggr2.add(o)\"\n v35_val = 0\n for v35_elem in (_m_Comp12_out[o] if (o in _m_Comp12_out) else set()):\n v35_val = (v35_val + v35_elem)\n v35_1 = o\n # Begin maint _m_Aggr2_out after \"Aggr2.add((v35_1, v35_val))\"\n _maint__m_Aggr2_out_add((v35_1, v35_val))\n # End maint _m_Aggr2_out after \"Aggr2.add((v35_1, v35_val))\"\n demand_Comp12(o)\n # End maint Aggr2 after \"_U_Aggr2.add(o)\"\n else:\n _U_Aggr2.incref(o)\n\ndef undemand_Aggr2(o):\n \"sum(DEMQUERY(Comp12, [o], setmatch(Comp12, 'bu', o)), None)\"\n if (_U_Aggr2.getref(o) == 1):\n # Begin maint Aggr2 before \"_U_Aggr2.remove(o)\"\n undemand_Comp12(o)\n v36_1 = o\n v36_elem = _m_Aggr2_out.singlelookup(o)\n # Begin maint _m_Aggr2_out before \"Aggr2.remove((v36_1, v36_elem))\"\n _maint__m_Aggr2_out_remove((v36_1, v36_elem))\n # End maint _m_Aggr2_out before \"Aggr2.remove((v36_1, v36_elem))\"\n # End maint Aggr2 before \"_U_Aggr2.remove(o)\"\n _U_Aggr2.remove(o)\n else:\n _U_Aggr2.decref(o)\n\ndef query_Aggr2(o):\n \"sum(DEMQUERY(Comp12, [o], setmatch(Comp12, 'bu', o)), None)\"\n if (o not in _UEXT_Aggr2):\n _UEXT_Aggr2.add(o)\n demand_Aggr2(o)\n return True\n\nComp12_d_M = RCSet()\ndef _maint_Comp12_d_M_Comp12_To_f_add(_e):\n # Iterate {(v29_o_f, v29__e) : v29_o_f in deltamatch(Comp12_To_f, 'b', _e, 1), (v29_o_f, v29__e) in _M}\n v29_o_f = _e\n if isinstance(v29_o_f, Set):\n for v29__e in v29_o_f:\n Comp12_d_M.add((v29_o_f, v29__e))\n\ndef _maint_Comp12_d_M_Comp12_To_f_remove(_e):\n # Iterate {(v30_o_f, v30__e) : v30_o_f in deltamatch(Comp12_To_f, 'b', _e, 1), (v30_o_f, v30__e) in _M}\n v30_o_f = _e\n if isinstance(v30_o_f, Set):\n for v30__e in v30_o_f:\n Comp12_d_M.remove((v30_o_f, v30__e))\n\ndef _maint_Comp12_d_M__M_add(_e):\n # Iterate {(v31_o_f, v31__e) : v31_o_f in Comp12_To_f, (v31_o_f, v31__e) in deltamatch(_M, 'bb', _e, 1)}\n (v31_o_f, v31__e) = _e\n if (v31_o_f in Comp12_To_f):\n Comp12_d_M.add((v31_o_f, v31__e))\n\ndef _maint_Comp12_d_M__M_remove(_e):\n # Iterate {(v32_o_f, v32__e) : v32_o_f in Comp12_To_f, (v32_o_f, v32__e) in deltamatch(_M, 'bb', _e, 1)}\n (v32_o_f, v32__e) = _e\n if (v32_o_f in Comp12_To_f):\n Comp12_d_M.remove((v32_o_f, v32__e))\n\nComp12_To_f = RCSet()\ndef _maint_Comp12_To_f_Comp12_d_F_f_add(_e):\n # Iterate {(v27_o, v27_o_f) : (v27_o, v27_o_f) in deltamatch(Comp12_d_F_f, 'bb', _e, 1)}\n (v27_o, v27_o_f) = _e\n if (v27_o_f not in Comp12_To_f):\n Comp12_To_f.add(v27_o_f)\n # Begin maint Comp12_d_M after \"Comp12_To_f.add(v27_o_f)\"\n _maint_Comp12_d_M_Comp12_To_f_add(v27_o_f)\n # End maint Comp12_d_M after \"Comp12_To_f.add(v27_o_f)\"\n else:\n Comp12_To_f.incref(v27_o_f)\n\ndef _maint_Comp12_To_f_Comp12_d_F_f_remove(_e):\n # Iterate {(v28_o, v28_o_f) : (v28_o, v28_o_f) in deltamatch(Comp12_d_F_f, 'bb', _e, 1)}\n (v28_o, v28_o_f) = _e\n if (Comp12_To_f.getref(v28_o_f) == 1):\n # Begin maint Comp12_d_M before \"Comp12_To_f.remove(v28_o_f)\"\n _maint_Comp12_d_M_Comp12_To_f_remove(v28_o_f)\n # End maint Comp12_d_M before \"Comp12_To_f.remove(v28_o_f)\"\n Comp12_To_f.remove(v28_o_f)\n else:\n Comp12_To_f.decref(v28_o_f)\n\nComp12_d_F_f = RCSet()\ndef _maint_Comp12_d_F_f_Comp12_To_add(_e):\n # Iterate {(v23_o, v23_o_f) : v23_o in deltamatch(Comp12_To, 'b', _e, 1), (v23_o, v23_o_f) in _F_f}\n v23_o = _e\n if hasattr(v23_o, 'f'):\n v23_o_f = v23_o.f\n Comp12_d_F_f.add((v23_o, v23_o_f))\n # Begin maint _m_Comp12_d_F_f_in after \"Comp12_d_F_f.add((v23_o, v23_o_f))\"\n _maint__m_Comp12_d_F_f_in_add((v23_o, v23_o_f))\n # End maint _m_Comp12_d_F_f_in after \"Comp12_d_F_f.add((v23_o, v23_o_f))\"\n # Begin maint Comp12_To_f after \"Comp12_d_F_f.add((v23_o, v23_o_f))\"\n _maint_Comp12_To_f_Comp12_d_F_f_add((v23_o, v23_o_f))\n # End maint Comp12_To_f after \"Comp12_d_F_f.add((v23_o, v23_o_f))\"\n\ndef _maint_Comp12_d_F_f_Comp12_To_remove(_e):\n # Iterate {(v24_o, v24_o_f) : v24_o in deltamatch(Comp12_To, 'b', _e, 1), (v24_o, v24_o_f) in _F_f}\n v24_o = _e\n if hasattr(v24_o, 'f'):\n v24_o_f = v24_o.f\n # Begin maint Comp12_To_f before \"Comp12_d_F_f.remove((v24_o, v24_o_f))\"\n _maint_Comp12_To_f_Comp12_d_F_f_remove((v24_o, v24_o_f))\n # End maint Comp12_To_f before \"Comp12_d_F_f.remove((v24_o, v24_o_f))\"\n # Begin maint _m_Comp12_d_F_f_in before \"Comp12_d_F_f.remove((v24_o, v24_o_f))\"\n _maint__m_Comp12_d_F_f_in_remove((v24_o, v24_o_f))\n # End maint _m_Comp12_d_F_f_in before \"Comp12_d_F_f.remove((v24_o, v24_o_f))\"\n Comp12_d_F_f.remove((v24_o, v24_o_f))\n\ndef _maint_Comp12_d_F_f__F_f_add(_e):\n # Iterate {(v25_o, v25_o_f) : v25_o in Comp12_To, (v25_o, v25_o_f) in deltamatch(_F_f, 'bb', _e, 1)}\n (v25_o, v25_o_f) = _e\n if (v25_o in Comp12_To):\n Comp12_d_F_f.add((v25_o, v25_o_f))\n # Begin maint _m_Comp12_d_F_f_in after \"Comp12_d_F_f.add((v25_o, v25_o_f))\"\n _maint__m_Comp12_d_F_f_in_add((v25_o, v25_o_f))\n # End maint _m_Comp12_d_F_f_in after \"Comp12_d_F_f.add((v25_o, v25_o_f))\"\n # Begin maint Comp12_To_f after \"Comp12_d_F_f.add((v25_o, v25_o_f))\"\n _maint_Comp12_To_f_Comp12_d_F_f_add((v25_o, v25_o_f))\n # End maint Comp12_To_f after \"Comp12_d_F_f.add((v25_o, v25_o_f))\"\n\nComp12_To = RCSet()\ndef _maint_Comp12_To__U_Comp12_add(_e):\n # Iterate {v21_o : v21_o in deltamatch(_U_Comp12, 'b', _e, 1)}\n v21_o = _e\n Comp12_To.add(v21_o)\n # Begin maint Comp12_d_F_f after \"Comp12_To.add(v21_o)\"\n _maint_Comp12_d_F_f_Comp12_To_add(v21_o)\n # End maint Comp12_d_F_f after \"Comp12_To.add(v21_o)\"\n\ndef _maint_Comp12_To__U_Comp12_remove(_e):\n # Iterate {v22_o : v22_o in deltamatch(_U_Comp12, 'b', _e, 1)}\n v22_o = _e\n # Begin maint Comp12_d_F_f before \"Comp12_To.remove(v22_o)\"\n _maint_Comp12_d_F_f_Comp12_To_remove(v22_o)\n # End maint Comp12_d_F_f before \"Comp12_To.remove(v22_o)\"\n Comp12_To.remove(v22_o)\n\nComp12 = RCSet()\ndef _maint_Comp12__U_Comp12_add(_e):\n # Iterate {(v15_o, v15_o_f, v15__e) : v15_o in deltamatch(_U_Comp12, 'b', _e, 1), (v15_o, v15_o_f) in _F_f, (v15_o_f, v15__e) in _M}\n v15_o = _e\n if hasattr(v15_o, 'f'):\n v15_o_f = v15_o.f\n if isinstance(v15_o_f, Set):\n for v15__e in v15_o_f:\n if ((v15_o, v15__e) not in Comp12):\n Comp12.add((v15_o, v15__e))\n # Begin maint _m_Comp12_out after \"Comp12.add((v15_o, v15__e))\"\n _maint__m_Comp12_out_add((v15_o, v15__e))\n # End maint _m_Comp12_out after \"Comp12.add((v15_o, v15__e))\"\n # Begin maint Aggr2 after \"Comp12.add((v15_o, v15__e))\"\n _maint_Aggr2_add((v15_o, v15__e))\n # End maint Aggr2 after \"Comp12.add((v15_o, v15__e))\"\n else:\n Comp12.incref((v15_o, v15__e))\n\ndef _maint_Comp12__U_Comp12_remove(_e):\n # Iterate {(v16_o, v16_o_f, v16__e) : v16_o in deltamatch(_U_Comp12, 'b', _e, 1), (v16_o, v16_o_f) in _F_f, (v16_o_f, v16__e) in _M}\n v16_o = _e\n if hasattr(v16_o, 'f'):\n v16_o_f = v16_o.f\n if isinstance(v16_o_f, Set):\n for v16__e in v16_o_f:\n if (Comp12.getref((v16_o, v16__e)) == 1):\n # Begin maint Aggr2 before \"Comp12.remove((v16_o, v16__e))\"\n _maint_Aggr2_remove((v16_o, v16__e))\n # End maint Aggr2 before \"Comp12.remove((v16_o, v16__e))\"\n # Begin maint _m_Comp12_out before \"Comp12.remove((v16_o, v16__e))\"\n _maint__m_Comp12_out_remove((v16_o, v16__e))\n # End maint _m_Comp12_out before \"Comp12.remove((v16_o, v16__e))\"\n Comp12.remove((v16_o, v16__e))\n else:\n Comp12.decref((v16_o, v16__e))\n\ndef _maint_Comp12__F_f_add(_e):\n # Iterate {(v17_o, v17_o_f, v17__e) : v17_o in _U_Comp12, (v17_o, v17_o_f) in deltamatch(Comp12_d_F_f, 'bb', _e, 1), (v17_o, v17_o_f) in Comp12_d_F_f, (v17_o_f, v17__e) in _M}\n (v17_o, v17_o_f) = _e\n if (v17_o in _U_Comp12):\n if ((v17_o, v17_o_f) in Comp12_d_F_f):\n if isinstance(v17_o_f, Set):\n for v17__e in v17_o_f:\n if ((v17_o, v17__e) not in Comp12):\n Comp12.add((v17_o, v17__e))\n # Begin maint _m_Comp12_out after \"Comp12.add((v17_o, v17__e))\"\n _maint__m_Comp12_out_add((v17_o, v17__e))\n # End maint _m_Comp12_out after \"Comp12.add((v17_o, v17__e))\"\n # Begin maint Aggr2 after \"Comp12.add((v17_o, v17__e))\"\n _maint_Aggr2_add((v17_o, v17__e))\n # End maint Aggr2 after \"Comp12.add((v17_o, v17__e))\"\n else:\n Comp12.incref((v17_o, v17__e))\n\ndef _maint_Comp12__M_add(_e):\n # Iterate {(v19_o, v19_o_f, v19__e) : v19_o in _U_Comp12, (v19_o, v19_o_f) in Comp12_d_F_f, (v19_o_f, v19__e) in deltamatch(Comp12_d_M, 'bb', _e, 1), (v19_o_f, v19__e) in Comp12_d_M}\n (v19_o_f, v19__e) = _e\n if ((v19_o_f, v19__e) in Comp12_d_M):\n for v19_o in (_m_Comp12_d_F_f_in[v19_o_f] if (v19_o_f in _m_Comp12_d_F_f_in) else set()):\n if (v19_o in _U_Comp12):\n if ((v19_o, v19__e) not in Comp12):\n Comp12.add((v19_o, v19__e))\n # Begin maint _m_Comp12_out after \"Comp12.add((v19_o, v19__e))\"\n _maint__m_Comp12_out_add((v19_o, v19__e))\n # End maint _m_Comp12_out after \"Comp12.add((v19_o, v19__e))\"\n # Begin maint Aggr2 after \"Comp12.add((v19_o, v19__e))\"\n _maint_Aggr2_add((v19_o, v19__e))\n # End maint Aggr2 after \"Comp12.add((v19_o, v19__e))\"\n else:\n Comp12.incref((v19_o, v19__e))\n\ndef _maint_Comp12__M_remove(_e):\n # Iterate {(v20_o, v20_o_f, v20__e) : v20_o in _U_Comp12, (v20_o, v20_o_f) in Comp12_d_F_f, (v20_o_f, v20__e) in deltamatch(Comp12_d_M, 'bb', _e, 1), (v20_o_f, v20__e) in Comp12_d_M}\n (v20_o_f, v20__e) = _e\n if ((v20_o_f, v20__e) in Comp12_d_M):\n for v20_o in (_m_Comp12_d_F_f_in[v20_o_f] if (v20_o_f in _m_Comp12_d_F_f_in) else set()):\n if (v20_o in _U_Comp12):\n if (Comp12.getref((v20_o, v20__e)) == 1):\n # Begin maint Aggr2 before \"Comp12.remove((v20_o, v20__e))\"\n _maint_Aggr2_remove((v20_o, v20__e))\n # End maint Aggr2 before \"Comp12.remove((v20_o, v20__e))\"\n # Begin maint _m_Comp12_out before \"Comp12.remove((v20_o, v20__e))\"\n _maint__m_Comp12_out_remove((v20_o, v20__e))\n # End maint _m_Comp12_out before \"Comp12.remove((v20_o, v20__e))\"\n Comp12.remove((v20_o, v20__e))\n else:\n Comp12.decref((v20_o, v20__e))\n\n_U_Comp12 = RCSet()\n_UEXT_Comp12 = Set()\ndef demand_Comp12(o):\n '{(o, _e) : o in _U_Comp12, (o, o_f) in _F_f, (o_f, _e) in _M}'\n if (o not in _U_Comp12):\n _U_Comp12.add(o)\n # Begin maint Comp12_To after \"_U_Comp12.add(o)\"\n _maint_Comp12_To__U_Comp12_add(o)\n # End maint Comp12_To after \"_U_Comp12.add(o)\"\n # Begin maint Comp12 after \"_U_Comp12.add(o)\"\n _maint_Comp12__U_Comp12_add(o)\n # End maint Comp12 after \"_U_Comp12.add(o)\"\n else:\n _U_Comp12.incref(o)\n\ndef undemand_Comp12(o):\n '{(o, _e) : o in _U_Comp12, (o, o_f) in _F_f, (o_f, _e) in _M}'\n if (_U_Comp12.getref(o) == 1):\n # Begin maint Comp12 before \"_U_Comp12.remove(o)\"\n _maint_Comp12__U_Comp12_remove(o)\n # End maint Comp12 before \"_U_Comp12.remove(o)\"\n # Begin maint Comp12_To before \"_U_Comp12.remove(o)\"\n _maint_Comp12_To__U_Comp12_remove(o)\n # End maint Comp12_To before \"_U_Comp12.remove(o)\"\n _U_Comp12.remove(o)\n else:\n _U_Comp12.decref(o)\n\ndef query_Comp12(o):\n '{(o, _e) : o in _U_Comp12, (o, o_f) in _F_f, (o_f, _e) in _M}'\n if (o not in _UEXT_Comp12):\n _UEXT_Comp12.add(o)\n demand_Comp12(o)\n return True\n\ndef _maint_Aggr1_add(_e):\n (v11_v1, v11_v2) = _e\n if (v11_v1 in _U_Aggr1):\n v11_val = _m_Aggr1_out.singlelookup(v11_v1)\n v11_val = (v11_val + v11_v2)\n v11_1 = v11_v1\n v11_elem = _m_Aggr1_out.singlelookup(v11_v1)\n # Begin maint _m_Aggr1_out before \"Aggr1.remove((v11_1, v11_elem))\"\n _maint__m_Aggr1_out_remove((v11_1, v11_elem))\n # End maint _m_Aggr1_out before \"Aggr1.remove((v11_1, v11_elem))\"\n # Begin maint _m_Aggr1_out after \"Aggr1.add((v11_1, v11_val))\"\n _maint__m_Aggr1_out_add((v11_1, v11_val))\n # End maint _m_Aggr1_out after \"Aggr1.add((v11_1, v11_val))\"\n\ndef _maint_Aggr1_remove(_e):\n (v12_v1, v12_v2) = _e\n if (v12_v1 in _U_Aggr1):\n v12_val = _m_Aggr1_out.singlelookup(v12_v1)\n v12_val = (v12_val - v12_v2)\n v12_1 = v12_v1\n v12_elem = _m_Aggr1_out.singlelookup(v12_v1)\n # Begin maint _m_Aggr1_out before \"Aggr1.remove((v12_1, v12_elem))\"\n _maint__m_Aggr1_out_remove((v12_1, v12_elem))\n # End maint _m_Aggr1_out before \"Aggr1.remove((v12_1, v12_elem))\"\n # Begin maint _m_Aggr1_out after \"Aggr1.add((v12_1, v12_val))\"\n _maint__m_Aggr1_out_add((v12_1, v12_val))\n # End maint _m_Aggr1_out after \"Aggr1.add((v12_1, v12_val))\"\n\n_U_Aggr1 = RCSet()\n_UEXT_Aggr1 = Set()\ndef demand_Aggr1(s):\n \"sum(DEMQUERY(Comp1, [s], setmatch(Comp1, 'bu', s)), None)\"\n if (s not in _U_Aggr1):\n _U_Aggr1.add(s)\n # Begin maint Aggr1 after \"_U_Aggr1.add(s)\"\n v13_val = 0\n for v13_elem in (_m_Comp1_out[s] if (s in _m_Comp1_out) else set()):\n v13_val = (v13_val + v13_elem)\n v13_1 = s\n # Begin maint _m_Aggr1_out after \"Aggr1.add((v13_1, v13_val))\"\n _maint__m_Aggr1_out_add((v13_1, v13_val))\n # End maint _m_Aggr1_out after \"Aggr1.add((v13_1, v13_val))\"\n demand_Comp1(s)\n # End maint Aggr1 after \"_U_Aggr1.add(s)\"\n else:\n _U_Aggr1.incref(s)\n\ndef undemand_Aggr1(s):\n \"sum(DEMQUERY(Comp1, [s], setmatch(Comp1, 'bu', s)), None)\"\n if (_U_Aggr1.getref(s) == 1):\n # Begin maint Aggr1 before \"_U_Aggr1.remove(s)\"\n undemand_Comp1(s)\n v14_1 = s\n v14_elem = _m_Aggr1_out.singlelookup(s)\n # Begin maint _m_Aggr1_out before \"Aggr1.remove((v14_1, v14_elem))\"\n _maint__m_Aggr1_out_remove((v14_1, v14_elem))\n # End maint _m_Aggr1_out before \"Aggr1.remove((v14_1, v14_elem))\"\n # End maint Aggr1 before \"_U_Aggr1.remove(s)\"\n _U_Aggr1.remove(s)\n else:\n _U_Aggr1.decref(s)\n\ndef query_Aggr1(s):\n \"sum(DEMQUERY(Comp1, [s], setmatch(Comp1, 'bu', s)), None)\"\n if (s not in _UEXT_Aggr1):\n _UEXT_Aggr1.add(s)\n demand_Aggr1(s)\n return True\n\nComp1_d_M = RCSet()\ndef _maint_Comp1_d_M_Comp1_Ts_add(_e):\n # Iterate {(v7_s, v7__e) : v7_s in deltamatch(Comp1_Ts, 'b', _e, 1), (v7_s, v7__e) in _M}\n v7_s = _e\n if isinstance(v7_s, Set):\n for v7__e in v7_s:\n Comp1_d_M.add((v7_s, v7__e))\n\ndef _maint_Comp1_d_M_Comp1_Ts_remove(_e):\n # Iterate {(v8_s, v8__e) : v8_s in deltamatch(Comp1_Ts, 'b', _e, 1), (v8_s, v8__e) in _M}\n v8_s = _e\n if isinstance(v8_s, Set):\n for v8__e in v8_s:\n Comp1_d_M.remove((v8_s, v8__e))\n\ndef _maint_Comp1_d_M__M_add(_e):\n # Iterate {(v9_s, v9__e) : v9_s in Comp1_Ts, (v9_s, v9__e) in deltamatch(_M, 'bb', _e, 1)}\n (v9_s, v9__e) = _e\n if (v9_s in Comp1_Ts):\n Comp1_d_M.add((v9_s, v9__e))\n\ndef _maint_Comp1_d_M__M_remove(_e):\n # Iterate {(v10_s, v10__e) : v10_s in Comp1_Ts, (v10_s, v10__e) in deltamatch(_M, 'bb', _e, 1)}\n (v10_s, v10__e) = _e\n if (v10_s in Comp1_Ts):\n Comp1_d_M.remove((v10_s, v10__e))\n\nComp1_Ts = RCSet()\ndef _maint_Comp1_Ts__U_Comp1_add(_e):\n # Iterate {v5_s : v5_s in deltamatch(_U_Comp1, 'b', _e, 1)}\n v5_s = _e\n Comp1_Ts.add(v5_s)\n # Begin maint Comp1_d_M after \"Comp1_Ts.add(v5_s)\"\n _maint_Comp1_d_M_Comp1_Ts_add(v5_s)\n # End maint Comp1_d_M after \"Comp1_Ts.add(v5_s)\"\n\ndef _maint_Comp1_Ts__U_Comp1_remove(_e):\n # Iterate {v6_s : v6_s in deltamatch(_U_Comp1, 'b', _e, 1)}\n v6_s = _e\n # Begin maint Comp1_d_M before \"Comp1_Ts.remove(v6_s)\"\n _maint_Comp1_d_M_Comp1_Ts_remove(v6_s)\n # End maint Comp1_d_M before \"Comp1_Ts.remove(v6_s)\"\n Comp1_Ts.remove(v6_s)\n\ndef _maint_Comp1__U_Comp1_add(_e):\n # Iterate {(v1_s, v1__e) : v1_s in deltamatch(_U_Comp1, 'b', _e, 1), (v1_s, v1__e) in _M}\n v1_s = _e\n if isinstance(v1_s, Set):\n for v1__e in v1_s:\n # Begin maint _m_Comp1_out after \"Comp1.add((v1_s, v1__e))\"\n _maint__m_Comp1_out_add((v1_s, v1__e))\n # End maint _m_Comp1_out after \"Comp1.add((v1_s, v1__e))\"\n # Begin maint Aggr1 after \"Comp1.add((v1_s, v1__e))\"\n _maint_Aggr1_add((v1_s, v1__e))\n # End maint Aggr1 after \"Comp1.add((v1_s, v1__e))\"\n\ndef _maint_Comp1__U_Comp1_remove(_e):\n # Iterate {(v2_s, v2__e) : v2_s in deltamatch(_U_Comp1, 'b', _e, 1), (v2_s, v2__e) in _M}\n v2_s = _e\n if isinstance(v2_s, Set):\n for v2__e in v2_s:\n # Begin maint Aggr1 before \"Comp1.remove((v2_s, v2__e))\"\n _maint_Aggr1_remove((v2_s, v2__e))\n # End maint Aggr1 before \"Comp1.remove((v2_s, v2__e))\"\n # Begin maint _m_Comp1_out before \"Comp1.remove((v2_s, v2__e))\"\n _maint__m_Comp1_out_remove((v2_s, v2__e))\n # End maint _m_Comp1_out before \"Comp1.remove((v2_s, v2__e))\"\n\ndef _maint_Comp1__M_add(_e):\n # Iterate {(v3_s, v3__e) : v3_s in _U_Comp1, (v3_s, v3__e) in deltamatch(Comp1_d_M, 'bb', _e, 1), (v3_s, v3__e) in Comp1_d_M}\n (v3_s, v3__e) = _e\n if (v3_s in _U_Comp1):\n if ((v3_s, v3__e) in Comp1_d_M):\n # Begin maint _m_Comp1_out after \"Comp1.add((v3_s, v3__e))\"\n _maint__m_Comp1_out_add((v3_s, v3__e))\n # End maint _m_Comp1_out after \"Comp1.add((v3_s, v3__e))\"\n # Begin maint Aggr1 after \"Comp1.add((v3_s, v3__e))\"\n _maint_Aggr1_add((v3_s, v3__e))\n # End maint Aggr1 after \"Comp1.add((v3_s, v3__e))\"\n\ndef _maint_Comp1__M_remove(_e):\n # Iterate {(v4_s, v4__e) : v4_s in _U_Comp1, (v4_s, v4__e) in deltamatch(Comp1_d_M, 'bb', _e, 1), (v4_s, v4__e) in Comp1_d_M}\n (v4_s, v4__e) = _e\n if (v4_s in _U_Comp1):\n if ((v4_s, v4__e) in Comp1_d_M):\n # Begin maint Aggr1 before \"Comp1.remove((v4_s, v4__e))\"\n _maint_Aggr1_remove((v4_s, v4__e))\n # End maint Aggr1 before \"Comp1.remove((v4_s, v4__e))\"\n # Begin maint _m_Comp1_out before \"Comp1.remove((v4_s, v4__e))\"\n _maint__m_Comp1_out_remove((v4_s, v4__e))\n # End maint _m_Comp1_out before \"Comp1.remove((v4_s, v4__e))\"\n\n_U_Comp1 = RCSet()\n_UEXT_Comp1 = Set()\ndef demand_Comp1(s):\n '{(s, _e) : s in _U_Comp1, (s, _e) in _M}'\n if (s not in _U_Comp1):\n _U_Comp1.add(s)\n # Begin maint Comp1_Ts after \"_U_Comp1.add(s)\"\n _maint_Comp1_Ts__U_Comp1_add(s)\n # End maint Comp1_Ts after \"_U_Comp1.add(s)\"\n # Begin maint Comp1 after \"_U_Comp1.add(s)\"\n _maint_Comp1__U_Comp1_add(s)\n # End maint Comp1 after \"_U_Comp1.add(s)\"\n else:\n _U_Comp1.incref(s)\n\ndef undemand_Comp1(s):\n '{(s, _e) : s in _U_Comp1, (s, _e) in _M}'\n if (_U_Comp1.getref(s) == 1):\n # Begin maint Comp1 before \"_U_Comp1.remove(s)\"\n _maint_Comp1__U_Comp1_remove(s)\n # End maint Comp1 before \"_U_Comp1.remove(s)\"\n # Begin maint Comp1_Ts before \"_U_Comp1.remove(s)\"\n _maint_Comp1_Ts__U_Comp1_remove(s)\n # End maint Comp1_Ts before \"_U_Comp1.remove(s)\"\n _U_Comp1.remove(s)\n else:\n _U_Comp1.decref(s)\n\ndef query_Comp1(s):\n '{(s, _e) : s in _U_Comp1, (s, _e) in _M}'\n if (s not in _UEXT_Comp1):\n _UEXT_Comp1.add(s)\n demand_Comp1(s)\n return True\n\nr = Set()\nt = Set()\no = Obj()\no.f = t\n# Begin maint Comp12_d_F_f after \"_F_f.add((o, t))\"\n_maint_Comp12_d_F_f__F_f_add((o, t))\n# End maint Comp12_d_F_f after \"_F_f.add((o, t))\"\n# Begin maint Comp12 after \"_F_f.add((o, t))\"\n_maint_Comp12__F_f_add((o, t))\n# End maint Comp12 after \"_F_f.add((o, t))\"\nfor x in [1, 2, 3, 4, 5]:\n r.add(x)\n # Begin maint Comp12_d_M after \"_M.add((r, x))\"\n _maint_Comp12_d_M__M_add((r, x))\n # End maint Comp12_d_M after \"_M.add((r, x))\"\n # Begin maint Comp12 after \"_M.add((r, x))\"\n _maint_Comp12__M_add((r, x))\n # End maint Comp12 after \"_M.add((r, x))\"\n # Begin maint Comp1_d_M after \"_M.add((r, x))\"\n _maint_Comp1_d_M__M_add((r, x))\n # End maint Comp1_d_M after \"_M.add((r, x))\"\n # Begin maint Comp1 after \"_M.add((r, x))\"\n _maint_Comp1__M_add((r, x))\n # End maint Comp1 after \"_M.add((r, x))\"\n t.add(x)\n # Begin maint Comp12_d_M after \"_M.add((t, x))\"\n _maint_Comp12_d_M__M_add((t, x))\n # End maint Comp12_d_M after \"_M.add((t, x))\"\n # Begin maint Comp12 after \"_M.add((t, x))\"\n _maint_Comp12__M_add((t, x))\n # End maint Comp12 after \"_M.add((t, x))\"\n # Begin maint Comp1_d_M after \"_M.add((t, x))\"\n _maint_Comp1_d_M__M_add((t, x))\n # End maint Comp1_d_M after \"_M.add((t, x))\"\n # Begin maint Comp1 after \"_M.add((t, x))\"\n _maint_Comp1__M_add((t, x))\n # End maint Comp1 after \"_M.add((t, x))\"\n# Begin maint Comp1 before \"_M.remove((r, 5))\"\n_maint_Comp1__M_remove((r, 5))\n# End maint Comp1 before \"_M.remove((r, 5))\"\n# Begin maint Comp1_d_M before \"_M.remove((r, 5))\"\n_maint_Comp1_d_M__M_remove((r, 5))\n# End maint Comp1_d_M before \"_M.remove((r, 5))\"\n# Begin maint Comp12 before \"_M.remove((r, 5))\"\n_maint_Comp12__M_remove((r, 5))\n# End maint Comp12 before \"_M.remove((r, 5))\"\n# Begin maint Comp12_d_M before \"_M.remove((r, 5))\"\n_maint_Comp12_d_M__M_remove((r, 5))\n# End maint Comp12_d_M before \"_M.remove((r, 5))\"\nr.remove(5)\ns = r\nprint((query_Aggr1(s) and _m_Aggr1_out.singlelookup(s)))\nprint((query_Aggr2(o) and _m_Aggr2_out.singlelookup(o)))" }, { "alpha_fraction": 0.2942386865615845, "alphanum_fraction": 0.2942386865615845, "avg_line_length": 33.71428680419922, "blob_id": "523fde59576fcd5f6f13105e178d8333ff436f37", "content_id": "ef8d86e89cbc5ea818b9aa1e7064ffdd52986ec7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 486, "license_type": "no_license", "max_line_length": 79, "num_lines": 14, "path": "/incoq/compiler/comp/__init__.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "###############################################################################\n# incoq.comp #\n# Author: Jon Brandvein #\n###############################################################################\n\n\"\"\"Relational set comprehensions and joins.\"\"\"\n\n\n# Exports.\nfrom .order import *\nfrom .clause import *\nfrom .join import *\nfrom .compspec import *\nfrom .comptrans import *\n" }, { "alpha_fraction": 0.576441764831543, "alphanum_fraction": 0.576441764831543, "avg_line_length": 34.009525299072266, "blob_id": "305b95cd484e332cacfb27d59f2a39fce0d9a2ac", "content_id": "9687925b718271b7ddcc8382cb6e46100742abdf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3676, "license_type": "no_license", "max_line_length": 74, "num_lines": 105, "path": "/incoq/compiler/incast/error.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Exceptions that support source code printing.\"\"\"\n\n\n__all__ = [\n 'ProgramError',\n 'format_exception_with_ast',\n 'print_exc_with_ast',\n]\n\n\nimport sys\nimport traceback\n\n\nclass ProgramError(Exception):\n \n \"\"\"Problem in the program being transformed. Generally syntactic\n in nature, but may be detected during the middle of transformation.\n \"\"\"\n \n @classmethod\n def ts(cls, tree):\n \"\"\"Unparse method to use for AST.\"\"\"\n # Use the unparser in __init__.py. We can't use\n # structconv.unparse_structast() because the tree\n # may be an IncAST, not a PyAST.\n from . import ts\n return ts(tree)\n \n def __init__(self, *args, node=None, ast_context=None, **kargs):\n super().__init__(*args, **kargs)\n self.node = node\n self.ast_context = ast_context\n \n @classmethod\n def format_ast_context(cls, ast_context):\n \"\"\"Return a source-code representation of the program, marking\n the occurrence of the node where the problem occurred.\n \"\"\"\n # Currently we zoom out from the source of the problem,\n # up the ast_context stack, printing more and more code\n # to give context.\n #\n # A better solution may be to print the whole program and\n # enclose the offending node with something like triple\n # angled brackets. But this requires more sophistication\n # in how we reconstruct the location of the offending node\n # in the tree. The information in the node stack alone\n # is likely not enough.\n s = 'AST context (most local node last):\\n'\n for node in ast_context:\n s += '==== {} ====\\n'.format(type(node).__name__)\n s += cls.ts(node) + '\\n'\n return s\n \n @classmethod\n def format_exception(cls, *args,\n node=None, ast_context=None,\n **kargs):\n \"\"\"Like traceback.format_exception(), but also include\n AST context info if present.\n \"\"\"\n msg = traceback.format_exception(*args, **kargs)\n \n try:\n # This might fail if the tree is malformed or there\n # is a bug in our source-generating code.\n if ast_context is not None:\n msg += '\\n' + cls.format_ast_context(ast_context)\n elif node is not None:\n msg += 'Error at {} node:\\n{}'.format(\n type(node).__name__,\n cls.ts(node))\n except Exception:\n msg += 'Error while attempting to get detailed information:\\n'\n msg += traceback.format_exc(**kargs)\n \n return msg\n \n def format_self(self, *args, **kargs):\n \"\"\"As above, but pull ast context info from this exception\n instance.\n \"\"\"\n return self.format_exception(\n *args,\n node=self.node, ast_context=self.ast_context,\n **kargs)\n\ndef format_exception_with_ast(exctype, value, tb, **kargs):\n \"\"\"Like traceback.format_exception(), but return additional AST\n context info if the exception is a ProgramError.\n \"\"\"\n if isinstance(value, ProgramError):\n return value.format_self(exctype, value, tb, **kargs)\n else:\n return traceback.format_exception(exctype, value, tb, **kargs)\n\ndef print_exc_with_ast(*, file=None, **kargs):\n \"\"\"Like traceback.print_exc(), but print additional AST context\n info in case of ProgramError.\n \"\"\"\n if file is None:\n file = sys.stderr\n msg = format_exception_with_ast(*sys.exc_info(), **kargs)\n print(''.join(msg), file=file)\n" }, { "alpha_fraction": 0.33527132868766785, "alphanum_fraction": 0.33527132868766785, "avg_line_length": 38.69230651855469, "blob_id": "b045b09f37b39ad3fd91a0aab7cf02db156ab3ac", "content_id": "c3de8fe15cc8d65ad9a9f4ef258e181477717319", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 516, "license_type": "no_license", "max_line_length": 79, "num_lines": 13, "path": "/incoq/runtime/__init__.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "###############################################################################\n# runtime #\n# Author: Jon Brandvein #\n###############################################################################\n\n\"\"\"Run-time library, for implementing our language features on top of\nPython. This include is required by both input programs and transformed\noutput programs.\n\"\"\"\n\n\n# Exports.\nfrom .runtimelib import *\n" }, { "alpha_fraction": 0.5276967883110046, "alphanum_fraction": 0.5685130953788757, "avg_line_length": 25.384614944458008, "blob_id": "c6d951ff5e7afb8f14b4980698decef15aaea793", "content_id": "25ca0d9abc2bb573d490aab413f1221597a1bf15", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 343, "license_type": "no_license", "max_line_length": 81, "num_lines": 13, "path": "/experiments/other/bday/bday_in.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from incoq.runtime import *\n\nbdays = set()\nbdays.update([1, 2, 3])\nBREL = set()\nBREL.update([('jon', 1), ('bo', 1), ('annie', 2)])\n\nprint(count({b for b in bdays if count({p for (p, b2) in BREL if b2 == b}) > 1}))\n\nBREL.remove(('jon', 1))\nBREL.add(('jon', 3))\n\nprint(count({b for b in bdays if count({p for (p, b2) in BREL if b2 == b}) > 1}))\n" }, { "alpha_fraction": 0.7222222089767456, "alphanum_fraction": 0.7222222089767456, "avg_line_length": 17, "blob_id": "2942b3cce8b286f8be707ef22c95c589fe618f4e", "content_id": "d130e57d2585439ae54bc09349eacd9ffabe0bb9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 54, "license_type": "no_license", "max_line_length": 28, "num_lines": 3, "path": "/experiments/rbac/constrainedrbac/crbac_orig.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from .crbac_in import *\n\ndo_query_nodemand = do_query\n" }, { "alpha_fraction": 0.5666605234146118, "alphanum_fraction": 0.5673086047172546, "avg_line_length": 32.64797592163086, "blob_id": "8eafbce4f5d696458e1ce37378e8e46e3da4607a", "content_id": "72751c173c1ccec18cdf7cfb2c8234e5ea1d0741", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 21602, "license_type": "no_license", "max_line_length": 79, "num_lines": 642, "path": "/incoq/compiler/comp/comptrans.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Comprehension transformation.\"\"\"\n\n\n__all__ = [\n 'IncComp',\n \n 'make_inccomp',\n 'inc_relcomp_helper',\n 'inc_relcomp',\n 'inc_changetrack',\n 'impl_auxonly_relcomp',\n 'patternize_comp',\n 'depatternize_comp',\n 'patternize_all',\n 'depatternize_all',\n 'comp_inc_needs_dem',\n 'comp_isvalid',\n]\n\nimport incoq.compiler.incast as L\nfrom incoq.compiler.set import Mask\n\nfrom .clause import EnumClause\nfrom .join import Join\nfrom .compspec import make_comp_maint_code, CompSpec\n\n\ndef get_uset_params(spec, mode, explicit):\n \"\"\"Return a tuple of the parameters that make it into the U-set\n under the given mode.\n \"\"\"\n \n if mode == 'none':\n return ()\n \n elif mode == 'all':\n return spec.params\n \n elif mode == 'uncon':\n return spec.get_uncon_params()\n \n elif mode == 'explicit':\n assert set(explicit).issubset(spec.params)\n return tuple(p for p in spec.params if p in explicit)\n \n else:\n assert()\n\n\nclass IncComp:\n \n \"\"\"A comprehension along with incrementalization info.\"\"\"\n \n def __init__(self, comp, spec, name, use_uset, uset_name, uset_params,\n rc, selfjoin, maint_impl, outsideinvs, uset_lru):\n self.comp = comp\n self.spec = spec \n self.name = name\n self.use_uset = use_uset\n self.uset_name = uset_name\n self.uset_params = uset_params\n self.rc = rc\n self.selfjoin = selfjoin\n self.maint_impl = maint_impl\n self.outsideinvs = outsideinvs\n self.uset_lru = uset_lru\n \n self.change_tracker = False\n \n assert maint_impl in ['batch', 'auxonly']\n\n\nclass RelcompMaintainer(L.OuterMaintTransformer):\n \n \"\"\"Relational comprehension maintenance transformer.\n \n If inccomp.change_tracker is True, instead of creating code to\n maintain the result, create code to add all changes that would\n be performed, whether addition or removal, to the result set.\n This requires that reference counting not be used.\n \"\"\"\n \n def __init__(self, manager, inccomp):\n super().__init__(inccomp.outsideinvs)\n self.manager = manager\n self.inccomp = inccomp\n \n self.demnames = [cl.demname for cl in inccomp.spec.join.clauses]\n \n name = inccomp.name\n rels = self.inccomp.spec.join.rels\n self.addfuncs = {rel: '_maint_{}_{}_add'.format(name, rel)\n for rel in rels}\n self.removefuncs = {rel: '_maint_{}_{}_remove'.format(name, rel)\n for rel in rels}\n \n def process(self, tree):\n self.maint_comps = []\n tree = super().process(tree)\n return tree, self.maint_comps\n \n def visit_Module(self, node):\n resinit = L.pe('RCSet()')\n \n code = L.pc('''\n RES = RESINIT\n ''', subst={'RES': self.inccomp.name,\n 'RESINIT': resinit})\n \n for rel in self.inccomp.spec.join.rels:\n prefix1 = self.manager.namegen.next_prefix()\n prefix2 = self.manager.namegen.next_prefix()\n \n add_code, add_comps = make_comp_maint_code(\n self.inccomp.spec, self.inccomp.name,\n rel, 'add', L.pe('_e'),\n prefix1,\n maint_impl=self.inccomp.maint_impl,\n rc=self.inccomp.rc,\n selfjoin=self.inccomp.selfjoin)\n \n remove_code, remove_comps = make_comp_maint_code(\n self.inccomp.spec, self.inccomp.name,\n rel, 'remove', L.pe('_e'),\n prefix2,\n maint_impl=self.inccomp.maint_impl,\n rc=self.inccomp.rc,\n selfjoin=self.inccomp.selfjoin)\n \n self.maint_comps.extend(add_comps)\n self.maint_comps.extend(remove_comps)\n \n code += L.pc('''\n def ADDFUNC(_e):\n ADDCODE\n def REMOVEFUNC(_e):\n REMOVECODE\n ''', subst={'<def>ADDFUNC': self.addfuncs[rel],\n '<c>ADDCODE': add_code,\n '<def>REMOVEFUNC': self.removefuncs[rel],\n '<c>REMOVECODE': remove_code})\n \n vt = self.manager.vartypes\n for e in self.inccomp.spec.join.enumvars:\n if e in vt:\n vt[prefix1 + e] = vt[e]\n vt[prefix2 + e] = vt[e]\n \n node = node._replace(body=code + node.body)\n node = self.generic_visit(node)\n return node\n \n def helper(self, node, var, op, elem):\n assert op in ['add', 'remove']\n \n # Maintenance goes after addition updates and before removals,\n # except when we're using augmented code, which relies on the\n # value of the set *without* the updated element.\n after_add = self.inccomp.selfjoin != 'aug'\n is_add = op == 'add'\n \n if self.inccomp.change_tracker:\n # For change trackers, all removals turn into additions,\n # but are still run in the same spot they would have been.\n funcdict = self.addfuncs\n else:\n funcdict = self.addfuncs if is_add else self.removefuncs\n \n func = funcdict[var]\n code = L.pc('FUNC(ELEM)',\n subst={'FUNC': func,\n 'ELEM': elem})\n \n if after_add ^ is_add:\n precode = code\n postcode = ()\n else:\n precode = ()\n postcode = code\n \n # Respect outsideinvs. This ensures that demand invariant\n # maintenance is inserted before/after the query maintenance.\n return self.with_outer_maint(node, self.inccomp.name, L.ts(node),\n precode, postcode)\n \n def visit_SetUpdate(self, node):\n node = self.generic_visit(node)\n \n if not node.is_varupdate():\n return node\n var, op, elem = node.get_varupdate()\n if var not in self.inccomp.spec.join.rels:\n return node\n \n return self.helper(node, var, op, elem)\n\n\nclass CompReplacer(L.NodeTransformer):\n \n \"\"\"Replace comp queries with uses of their saved results.\"\"\"\n \n def __init__(self, manager, inccomp):\n super().__init__()\n self.manager = manager\n self.inccomp = inccomp\n \n def process(self, tree):\n self.manager.add_invariant(self.inccomp.name, self.inccomp)\n return super().process(tree)\n \n def get_res_code(self):\n \"\"\"Return code (expression) to lookup the result.\"\"\"\n params = self.inccomp.comp.params\n \n if len(params) > 0:\n resexp = self.inccomp.spec.resexp\n assert isinstance(resexp, L.Tuple)\n resexp_arity = len(resexp.elts)\n n_rescomponents = resexp_arity - len(params)\n \n maskstr = 'b' * len(params) + 'u' * n_rescomponents\n masknode = Mask(maskstr).make_node()\n paramsnode = L.tuplify(params)\n \n code = L.pe('''\n setmatch(RES, MASK, PARAMS)\n ''', subst={'RES': L.ln(self.inccomp.name),\n 'MASK': masknode,\n 'PARAMS': paramsnode})\n \n else:\n code = L.ln(self.inccomp.name)\n \n return code\n \n def visit_Module(self, node):\n # Recurse after adding the query function.\n # (Probably doesn't matter.)\n \n if self.inccomp.use_uset:\n name = self.inccomp.name\n lrulimit = self.inccomp.uset_lru\n demparams = self.inccomp.uset_params\n specstr = str(self.inccomp.spec)\n \n maker = L.DemfuncMaker(name, specstr, demparams, lrulimit)\n code = maker.make_alldem()\n \n node = node._replace(body=code + node.body)\n \n node = self.generic_visit(node)\n \n return node\n \n def visit_NoDemQuery(self, node):\n # If a comp is wrapped in a NoDemQuery and gets incrementalized,\n # make sure to strip the Demand node that got added around it.\n was_comp = isinstance(node.value, L.Comp)\n \n node = self.generic_visit(node)\n \n if was_comp and isinstance(node.value, L.DemQuery):\n node = node.value.value\n \n return node\n \n def visit_Comp(self, node):\n node = self.generic_visit(node)\n \n if node != self.inccomp.comp:\n return node\n \n if self.inccomp.use_uset:\n code = L.DemQuery(self.inccomp.name,\n tuple(L.ln(p) for p in self.inccomp.uset_params),\n self.get_res_code())\n else:\n code = self.get_res_code()\n \n return code\n\n\nclass AuxonlyTransformer(L.NodeTransformer):\n \n \"\"\"Implement a relcomp using clause ordering and auxiliary maps.\"\"\"\n \n # There are two kinds of translations.\n #\n # - Ordinary uses of comprehensions get replaced with a\n # call to a query function.\n #\n # - Iterations over comprehensions get replaced with a direct\n # implementation, without need of a separate query function\n # or temporary result set. This only applies if the resexp\n # syntactically matches the loop target (they are the same\n # variable or tuple of variables), and if the comp is\n # duplicate-safe. \n #\n # The query function is not created if there are no ordinary\n # uses to require it.\n \n def __init__(self, manager, comp, name, *, augmented):\n self.manager = manager\n self.comp = comp\n self.name = name\n self.augmented = augmented\n \n spec = CompSpec.from_comp(self.comp, manager.factory)\n self.spec = spec\n \n self.need_func = False\n \n def visit_Module(self, node):\n spec = self.spec\n \n node = self.generic_visit(node)\n \n code = L.pc('''\n result.nsadd(RESEXP)\n ''', subst={'RESEXP': spec.resexp})\n \n code = spec.join.get_code(spec.params, code,\n augmented=self.augmented)\n \n code = L.pc('''\n SPEC_STR\n result = set()\n COMPUTE\n return result\n ''', subst={'SPEC_STR': L.Str(s=str(self.spec)),\n '<c>COMPUTE': code})\n \n code = L.plainfuncdef(L.N.queryfunc(self.name), spec.params, code)\n \n if self.need_func:\n node = node._replace(body=code + node.body)\n \n return node\n \n def visit_Comp(self, node):\n node = self.generic_visit(node)\n \n if node != self.comp:\n return node\n \n self.need_func = True\n \n call = L.pe('QFUN(__ARGS)',\n subst={'QFUN': L.ln(L.N.queryfunc(self.name))})\n \n call = call._replace(args=tuple(L.ln(p)\n for p in self.comp.params))\n \n return call\n \n def visit_For(self, node):\n # Recurse only after we've handled the potential special case.\n \n if node.iter != self.comp:\n return self.generic_visit(node)\n \n spec = self.spec\n \n special_case = (\n node.orelse == () and\n spec.is_duplicate_safe and\n L.is_vartuple(node.target) and\n L.is_vartuple(spec.resexp) and\n (L.get_vartuple(node.target) == L.get_vartuple(spec.resexp) or\n (L.is_name(node.target) and L.get_name(node.target) == '_'))\n )\n if special_case:\n code = ()\n code += (L.Comment('Iterate ' + str(spec)),)\n code += spec.join.get_code(spec.params, node.body,\n augmented=self.augmented)\n return self.visit(code)\n else:\n return self.generic_visit(node)\n\n\nclass SubqueryArityFinder(L.NodeVisitor):\n \n \"\"\"Determine if all occurrences of a comprehension are as a\n subquery with an arity consistent with the result expression.\n \"\"\"\n \n class Failure(BaseException):\n pass\n \n def __init__(self, comp):\n super().__init__()\n self.comp = comp\n \n def process(self, tree):\n # Result value:\n # False: non-subquery occurrence found, or subquery\n # occurrences with inconsistent arity\n # integer: subquery occurrences only, consistent arity\n \n if isinstance(self.comp.resexp, L.Tuple):\n self.arity = len(self.comp.resexp.elts)\n else:\n self.arity = 1\n \n try:\n super().process(tree)\n except self.Failure:\n self.arity = False\n \n return self.arity\n \n def visit_Comp(self, node):\n if node == self.comp:\n raise self.Failure\n \n self.generic_visit(node)\n \n def visit_Enumerator(self, node):\n if node.iter == self.comp:\n if not L.is_vartuple(node.target):\n raise self.Failure\n arity = len(L.get_vartuple(node.target))\n if self.arity != arity:\n raise self.Failure\n return\n \n self.generic_visit(node)\n\n\ndef get_subquery_demnames(spec):\n \"\"\"For each subquery in a comp, construct an invariant definition\n for its U-set, formed as the conjunction of the enumerators to the\n left of the subquery's occurrence. Each parameter of the subquery\n must be an enumvar in one of these clauses. Return a list of pairs\n of a demand name of a subquery and its invariant (comp spec).\n \"\"\"\n clauses = spec.join.clauses\n clauses = [cl for cl in clauses if cl.kind is cl.KIND_ENUM]\n result = []\n \n for i, cl in enumerate(clauses):\n if cl.has_demand:\n # Grab clauses to the left of this one.\n # If they too are demand clauses, unwrap them to get\n # the underlying clause.\n demclauses = clauses[:i]\n for i, demcl in enumerate(demclauses):\n if demcl.has_demand:\n demclauses[i] = demcl.cl\n \n # Make sure the demand parameters are all bound in clauses\n # to the left of here.\n boundvars = set(v for demcl in demclauses for v in demcl.enumvars)\n unboundparams = set(cl.demparams) - boundvars\n assert len(unboundparams) == 0, \\\n 'Subquery parameter(s) {} not bound in clause to left ' \\\n 'of occurrence'.format(unboundparams)\n # Construct the invariant.\n new_join = Join(demclauses, spec.join.factory, None)\n new_spec = CompSpec(new_join, L.tuplify(cl.demparams), ())\n result.append((cl.demname, new_spec))\n \n return result\n\n\ndef make_inccomp(tree, manager, comp, name, *,\n force_uset=False, outsideinvs=()):\n \"\"\"Make the IncComp structure describing how to incrementalize\n a comprehension.\n \"\"\"\n get = manager.options.get_queryopt\n uset_mode = get(comp, 'uset_mode')\n explicit = get(comp, 'uset_params')\n maint_impl = get(comp, 'maint_impl')\n \n no_rc = get(comp, 'no_rc')\n rc_elim = manager.options.get_opt('rc_elim')\n # 'rc_elim' must be enabled to do elimination,\n # even when 'no_rc' is set on the individual query.\n if rc_elim:\n if no_rc:\n rc = 'no'\n else:\n rc = 'safe'\n else:\n rc = 'yes'\n \n selfjoin_strat = manager.options.get_opt('selfjoin_strat')\n \n can_flatten = (isinstance(comp.resexp, L.Tuple) and\n SubqueryArityFinder.run(tree, comp))\n spec = CompSpec.from_comp(comp, manager.factory)\n uset_params = get_uset_params(spec, uset_mode, explicit)\n spec = spec.with_uset(L.N.uset(name), uset_params,\n force=force_uset)\n spec = spec.without_params(flat=can_flatten)\n use_uset = len(uset_params) > 0 or force_uset\n \n uset_lru = get(comp, 'uset_lru')\n if uset_lru is None:\n uset_lru = manager.options.get_opt('default_uset_lru')\n \n return IncComp(comp, spec, name, use_uset, L.N.uset(name),\n uset_params, rc, selfjoin_strat, maint_impl,\n outsideinvs, uset_lru)\n\ndef inc_relcomp_helper(tree, manager, inccomp):\n \"\"\"Incrementalize a comprehension based on an IncComp structure.\n Also return maintenance comprehensions.\n \"\"\"\n if manager.options.get_opt('verbose'):\n s = ('Incrementalizing ' + inccomp.name + ': ').ljust(45)\n s += L.ts(inccomp.comp)\n print(s)\n \n # FIXME: Is the below demand code correct when the inner query's\n # demand invariant must be reference-counted? Given that change\n # tracking doesn't handle reference counting?\n \n # Create invariants for demand sets for demand-driven subqueries.\n # This only fires if we have subqueries with demand and this outer\n # query is being transformed WITHOUT filtering. If we are being\n # transformed WITH filtering, the inner queries would have been\n # rewritten without demand first; see demand/demtrans.py.\n deminvs = get_subquery_demnames(inccomp.spec)\n # Incrementalize them. Use a delta-set for deferring the propagation\n # of demand until after the inner query's maintenance code already\n # runs. (The inner query has already been incrementalized.)\n for demname, demspec in deminvs:\n # Hack: OuterDemandMaintainer should be refactored to move\n # to here, to avoid this import.\n from incoq.demand.demtrans import OuterDemandMaintainer\n \n # Determine dependencies of demand invariant.\n at_rels = set(e.enumrel for e in demspec.join.clauses)\n \n deltaname = L.N.deltaset(demname)\n demcomp = demspec.to_comp({})\n # Add delta maintenance code as per the invariant.\n tree = inc_changetrack(tree, manager, demcomp, deltaname)\n # Add code (outside all other maintenance) to propagate\n # the delta changes to the actual inner query demand function.\n tree = OuterDemandMaintainer.run(\n tree, manager, deltaname,\n demname, at_rels,\n L.get_vartuple(demcomp.resexp),\n None)\n \n # Unwrap the demand clauses in the comp now that we've handled them.\n spec = inccomp.spec\n new_clauses = []\n for cl in spec.join.clauses:\n if cl.has_demand:\n cl = cl.cl\n new_clauses.append(cl)\n new_spec = spec._replace(join=spec.join._replace(clauses=new_clauses))\n inccomp.spec = new_spec\n \n tree = CompReplacer.run(tree, manager, inccomp)\n tree, comps = RelcompMaintainer.run(tree, manager, inccomp)\n \n # If this was an original query, register it with the manager.\n if 'in_original' in inccomp.comp.options:\n manager.original_queryinvs.add(inccomp.name)\n \n return tree, comps\n\ndef inc_relcomp(tree, manager, comp, name, *, outsideinvs=()):\n \"\"\"Incrementalize a comprehension.\"\"\"\n inccomp = make_inccomp(tree, manager, comp, name, outsideinvs=outsideinvs)\n tree, _comps = inc_relcomp_helper(tree, manager, inccomp)\n return tree\n\ndef inc_changetrack(tree, manager, comp, name):\n \"\"\"Generate change-tracking code.\"\"\"\n inccomp = make_inccomp(tree, manager, comp, name)\n inccomp.change_tracker = True\n tree, _comps = inc_relcomp_helper(tree, manager, inccomp)\n return tree\n\n\ndef impl_auxonly_relcomp(tree, manager, comp, name):\n if manager.options.get_opt('verbose'):\n s = ('Auxonly ' + name + ': ').ljust(45)\n s += L.ts(comp)\n print(s)\n \n augmented = manager.options.get_opt('selfjoin_strat') == 'aug'\n \n tree = AuxonlyTransformer.run(tree, manager, comp, name,\n augmented=augmented)\n return tree\n\n\ndef patternize_comp(comp, factory):\n \"\"\"Patternize a comprehension.\"\"\"\n spec = CompSpec.from_comp(comp, factory)\n spec = spec.to_pattern()\n return spec.to_comp(comp.options)\n\ndef depatternize_comp(comp, factory):\n \"\"\"Depatternize a comprehension.\"\"\"\n spec = CompSpec.from_comp(comp, factory)\n spec = spec.to_nonpattern()\n return spec.to_comp(comp.options)\n\ndef patternize_all(tree, factory):\n \"\"\"Patternize all comps in the program.\"\"\"\n class Patternizer(L.QueryMapper):\n def map_Comp(self, node):\n return patternize_comp(node, factory)\n \n return Patternizer.run(tree)\n\ndef depatternize_all(tree, factory):\n \"\"\"Depatternize all (valid) comps in the program.\"\"\"\n class Depatternizer(L.QueryMapper):\n ignore_invalid = True\n def map_Comp(self, node):\n return depatternize_comp(node, factory)\n \n return Depatternizer.run(tree)\n\n\ndef comp_inc_needs_dem(manager, comp):\n \"\"\"Given a Comp node, return whether demand is required for\n incrementalization.\n \"\"\"\n spec = CompSpec.from_comp(comp, manager.factory)\n return spec.join.has_demand\n\n\ndef comp_isvalid(manager, comp):\n \"\"\"Return whether a Comp node satisfies the syntactic requirements\n of a relational comprehension.\n \"\"\"\n try:\n CompSpec.from_comp(comp, manager.factory)\n except TypeError:\n return False\n return True\n" }, { "alpha_fraction": 0.4532374143600464, "alphanum_fraction": 0.49520382285118103, "avg_line_length": 23.52941131591797, "blob_id": "200c6667fb09da176e2622d37f327bcfc4c74908", "content_id": "c88245ec6f71ef762e56e7d6c7fac80fe9504ca9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 834, "license_type": "no_license", "max_line_length": 123, "num_lines": 34, "path": "/incoq/tests/programs/deminc/nested_subdem_in.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "# Like nested_in, but use join instead of tags for subquery demand.\n\nfrom incoq.runtime import *\n\nOPTIONS(\n subdem_tags = False,\n)\n\nQUERYOPTIONS(\n '{(a2, c) for (a2, b) in E for (b2, c) in E if a == a2 if b == b2}',\n impl = 'inc',\n uset_mode = 'all',\n)\n\nQUERYOPTIONS(\n '{(x, z) for (x, y) in E for (y2, z) in {(a2, c) for (a2, b) in E for (b2, c) in E if a == a2 if b == b2} if y == y2}',\n impl = 'dem',\n # Use default of uset_mode = 'uncon',\n # test that \"a\" is picked up as unconstrained.\n)\n\nE = Set()\n\nfor v1, v2 in [(1, 2), (2, 3), (3, 4), (4, 5)]:\n E.add((v1, v2))\n\ndef query(a):\n print(sorted({(x, z) for (x, y) in E\n for (y2, z) in {(a2, c) for (a2, b) in E for (b2, c) in E if a == a2 if b == b2}\n if y == y2}))\n\nquery(2)\nE.remove((1, 2))\nquery(2)\n" }, { "alpha_fraction": 0.47361624240875244, "alphanum_fraction": 0.47675275802612305, "avg_line_length": 29.621469497680664, "blob_id": "34cd2dd9084ae443f47d1bc8aaaf9fadd0e65c04", "content_id": "54f6b9020a4eafbc0e7a02f78a97580feca6fb0f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5420, "license_type": "no_license", "max_line_length": 71, "num_lines": 177, "path": "/incoq/tests/invinc/obj/test_objclause.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Unit tests for objclause.py\"\"\"\n\n\nimport unittest\n\nimport incoq.compiler.incast as L\nfrom incoq.compiler.comp import Rate\nfrom incoq.compiler.obj.objclause import *\n\n\nclass ObjClauseFactory(ObjClauseFactory_Mixin):\n typecheck = True\n\nclass ObjClauseFactory_NoTC(ObjClauseFactory):\n typecheck = False\n\n\nclass ObjClauseCase(unittest.TestCase):\n \n def test_mclause(self):\n cl = MClause('S', 'x')\n \n # Construct from expression.\n cl2 = MClause.from_expr(L.pe('(S, x) in _M'))\n self.assertEqual(cl2, cl)\n \n # AST round-trip.\n clast = cl.to_AST()\n exp_clast = L.Enumerator(L.tuplify(('S', 'x'), lval=True),\n L.ln('_M'))\n self.assertEqual(clast, exp_clast)\n cl2 = MClause.from_AST(exp_clast, ObjClauseFactory)\n self.assertEqual(cl2, cl)\n \n # Attributes.\n self.assertEqual(cl.enumlhs, ('S', 'x'))\n self.assertEqual(cl.pat_mask, (False, True))\n self.assertEqual(cl.enumvars_tagsin, ('S',))\n self.assertEqual(cl.enumvars_tagsout, ('x',))\n \n # Rate.\n rate = cl.rate([])\n self.assertEqual(rate, Rate.UNRUNNABLE)\n \n # Code.\n code = cl.get_code(['S'], L.pc('pass'))\n exp_code = L.pc('''\n if isinstance(S, Set):\n for x in S:\n pass\n ''')\n self.assertEqual(code, exp_code)\n \n # Code, no type-checks.\n cl = MClause_NoTC('S', 'x')\n code = cl.get_code(['S'], L.pc('pass'))\n exp_code = L.pc('''\n for x in S:\n pass\n ''')\n self.assertEqual(code, exp_code)\n \n def test_fclause(self):\n cl = FClause('o', 'v', 'f')\n \n # Construct from expression.\n cl2 = FClause.from_expr(L.pe('(o, v) in _F_f'))\n self.assertEqual(cl2, cl)\n \n # AST round-trip.\n clast = cl.to_AST()\n exp_clast = L.Enumerator(L.tuplify(('o', 'v'), lval=True),\n L.ln('_F_f'))\n self.assertEqual(clast, exp_clast)\n cl2 = FClause.from_AST(exp_clast, ObjClauseFactory)\n self.assertEqual(cl2, cl)\n \n # Attributes.\n self.assertEqual(cl.enumlhs, ('o', 'v'))\n self.assertEqual(cl.pat_mask, (False, True))\n self.assertEqual(cl.enumvars_tagsin, ('o',))\n self.assertEqual(cl.enumvars_tagsout, ('v',))\n \n # Rate.\n rate = cl.rate([])\n self.assertEqual(rate, Rate.UNRUNNABLE)\n rate = cl.rate(['o'])\n self.assertEqual(rate, Rate.CONSTANT)\n \n # Code.\n code = cl.get_code(['o'], L.pc('pass'))\n exp_code = L.pc('''\n if hasattr(o, 'f'):\n v = o.f\n pass\n ''')\n self.assertEqual(code, exp_code)\n \n # Code, no type-checks.\n cl = FClause_NoTC('o', 'v', 'f')\n code = cl.get_code(['o'], L.pc('pass'))\n exp_code = L.pc('''\n v = o.f\n pass\n ''')\n self.assertEqual(code, exp_code)\n \n def test_mapclause(self):\n cl = MapClause('m', 'k', 'v')\n \n # Construct from expression.\n cl2 = MapClause.from_expr(L.pe('(m, k, v) in _MAP'))\n self.assertEqual(cl2, cl)\n \n # AST round-trip.\n clast = cl.to_AST()\n exp_clast = L.Enumerator(L.tuplify(('m', 'k', 'v'), lval=True),\n L.ln('_MAP'))\n self.assertEqual(clast, exp_clast)\n cl2 = MapClause.from_AST(exp_clast, ObjClauseFactory)\n self.assertEqual(cl2, cl)\n \n # Attributes.\n self.assertEqual(cl.enumlhs, ('m', 'k', 'v'))\n self.assertEqual(cl.pat_mask, (False, True, True))\n self.assertEqual(cl.enumvars_tagsin, ('m',))\n self.assertEqual(cl.enumvars_tagsout, ('k', 'v'))\n \n # Rate.\n rate = cl.rate([])\n self.assertEqual(rate, Rate.UNRUNNABLE)\n \n # Code.\n \n code = cl.get_code(['m'], L.pc('pass'))\n exp_code = L.pc('''\n if isinstance(m, Map):\n for k, v in m.items():\n pass\n ''')\n self.assertEqual(code, exp_code)\n \n code = cl.get_code(['m', 'k'], L.pc('pass'))\n exp_code = L.pc('''\n if isinstance(m, Map):\n if k in m:\n v = m[k]\n pass\n ''')\n self.assertEqual(code, exp_code)\n \n # Code, no type-checks.\n cl = MapClause_NoTC('m', 'k', 'v')\n code = cl.get_code(['m'], L.pc('pass'))\n exp_code = L.pc('''\n for k, v in m.items():\n pass\n ''')\n self.assertEqual(code, exp_code)\n \n def test_objclausefactory(self):\n cl = MClause('S', 'x')\n clast = L.Enumerator(L.tuplify(['S', 'x'], lval=True),\n L.pe('_M'))\n cl2 = ObjClauseFactory.from_AST(clast)\n self.assertEqual(cl2, cl)\n \n cl = FClause_NoTC('o', 'v', 'f')\n clast = L.Enumerator(L.tuplify(['o', 'v'], lval=True),\n L.pe('_F_f'))\n cl2 = ObjClauseFactory_NoTC.from_AST(clast)\n self.assertEqual(cl2, cl)\n self.assertIsInstance(cl2, FClause_NoTC)\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.3834456503391266, "alphanum_fraction": 0.39091241359710693, "avg_line_length": 35.36423873901367, "blob_id": "722b8075eeeb6a0a977f115a40b347633d65e5fe", "content_id": "c890dec0d41a33bf641ae338e55f7d6e74242730", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10982, "license_type": "no_license", "max_line_length": 78, "num_lines": 302, "path": "/incoq/tests/invinc/comp/test_compspec.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Unit tests for compspec.py.\"\"\"\n\n\nimport unittest\n\nfrom simplestruct import Field\n\nimport incoq.compiler.incast as L\n\nfrom incoq.compiler.comp.clause import (EnumClause, ClauseFactory as CF,\n ABCStruct)\nfrom incoq.compiler.comp.join import Join\nfrom incoq.compiler.comp.compspec import *\nfrom incoq.compiler.comp.compspec import TupleTreeConstraintMaker\n\n\nclass CompSpecCase(unittest.TestCase):\n \n def make_spec(self, source, params, CF=CF):\n comp = L.pe('COMP({{{}}}, [], {{}})'.format(source))\n comp = comp._replace(params=tuple(params))\n return CompSpec.from_comp(comp, CF)\n \n def test_basic(self):\n cl1 = EnumClause.from_expr(L.pe('(x, y) in R'))\n cl2 = EnumClause.from_expr(L.pe('(y, z) in S'))\n spec = CompSpec(Join([cl1, cl2], CF, None), L.pe('(x, z)'), ['x'])\n \n # AST round-trip.\n comp = spec.to_comp({})\n exp_comp = L.pe('COMP({(x, z) for (x, y) in R for (y, z) in S}, '\n '[x], {})')\n self.assertEqual(comp, exp_comp)\n spec2 = CompSpec.from_comp(exp_comp, CF)\n self.assertEqual(spec, spec2)\n \n def test_duprc(self):\n spec = self.make_spec(\n '(x, y, z) for (x, y) in R for (y, z) in R', [])\n self.assertTrue(spec.is_duplicate_safe)\n \n spec = self.make_spec(\n '(x, z) for (x, y) in R for (y, z) in R', [])\n self.assertFalse(spec.is_duplicate_safe)\n \n spec = self.make_spec(\n '(x, y) for (x, y) in R for (y, _) in R', [])\n self.assertTrue(spec.is_duplicate_safe)\n \n def test_pattern(self):\n spec = self.make_spec(\n '(a, b, c) for (a, b) in R for (b, c, d, e) in S '\n 'if a == b', ['d'])\n spec = spec.to_pattern()\n exp_spec = self.make_spec(\n '(a, a, c) for (a, a) in R for (a, c, d, _) in S', ['d'])\n self.assertEqual(spec, exp_spec)\n \n spec = spec.to_nonpattern()\n exp_spec = self.make_spec(\n '(a, a, c) for (a, a_2) in R if a == a_2 '\n 'for (a_3, c, d_2, _v1) in S if a_2 == a_3 '\n 'if d == d_2', ['d'])\n self.assertEqual(spec, exp_spec)\n \n def test_without_params(self):\n orig_spec = self.make_spec(\n '(c, d) for (a, b, c, d) in R', ['a', 'b', 'z'])\n spec = orig_spec.without_params()\n exp_spec = self.make_spec(\n '(a, b, z, (c, d)) for (a, b, c, d) in R', [])\n self.assertEqual(spec, exp_spec)\n \n spec = orig_spec.without_params(flat=True)\n exp_spec = self.make_spec(\n '(a, b, z, c, d) for (a, b, c, d) in R', [])\n self.assertEqual(spec, exp_spec)\n \n def test_uset(self):\n spec = self.make_spec(\n '(a, b, c) for (a, b, c) in R', ['a', 'b'])\n spec = spec.with_uset('U', ['a'])\n exp_spec = self.make_spec(\n '(a, b, c) for a in U for (a, b, c) in R', ['a', 'b'])\n self.assertEqual(spec, exp_spec)\n \n def test_iterate_code(self):\n # Test single.\n code = for_rel_code(['a', 'b'], L.pe('R'), L.pc('pass'))\n exp_code = L.pc('''\n for a, b in R:\n pass\n ''')\n self.assertEqual(code, exp_code)\n code = for_rels_union_code(['a', 'b'], [L.pe('R')], L.pc('pass'), '_')\n self.assertEqual(code, exp_code)\n \n # Test union.\n code = for_rels_union_code(['a', 'b'], [L.pe('R'), L.pe('S')],\n L.pc('pass'), 'D')\n exp_code = L.pc('''\n D = set()\n for a, b in R:\n D.nsadd((a, b))\n for a, b in S:\n D.nsadd((a, b))\n for a, b in D:\n pass\n del D\n ''')\n self.assertEqual(code, exp_code)\n \n # Test verify disjoint union.\n code = for_rels_union_code(['a', 'b'], [L.pe('R'), L.pe('S')],\n L.pc('pass'), 'D', verify_disjoint=True)\n exp_code = L.pc('''\n D = set()\n for a, b in R:\n assert (a, b) not in D\n D.add((a, b))\n for a, b in S:\n assert (a, b) not in D\n D.add((a, b))\n for a, b in D:\n pass\n del D\n ''')\n self.assertEqual(code, exp_code)\n \n # Test disjoint union.\n code = for_rels_union_disjoint_code(\n ['a', 'b'], [L.pe('R'), L.pe('S')], L.pc('pass'))\n exp_code = L.pc('''\n for a, b in R:\n pass\n for a, b in S:\n pass\n ''')\n self.assertEqual(code, exp_code)\n \n def test_comp_maint_code(self):\n # Basic.\n \n spec = self.make_spec('(x, z) for (x, y) in R for (y, z) in R', [])\n code, comps = make_comp_maint_code(\n spec, 'Q', 'R', 'add', L.pe('e'), '_',\n maint_impl='auxonly', rc='safe',\n selfjoin='sub')\n \n comp1 = L.pe('''\n COMP({(_x, _y, _z) for (_x, _y) in deltamatch(R, 'bb', e, 1)\n for (_y, _z) in (R - {e})},\n [], {'impl': 'auxonly',\n '_deltarel': 'R',\n '_deltaelem': 'e',\n '_deltalhs': '(_x, _y)',\n '_deltaop': 'add'})\n ''')\n comp2 = L.pe('''\n COMP({(_x, _y, _z) for (_x, _y) in R\n for (_y, _z) in deltamatch(R, 'bb', e, 1)},\n [], {'impl': 'auxonly',\n '_deltarel': 'R',\n '_deltaelem': 'e',\n '_deltalhs': '(_y, _z)',\n '_deltaop': 'add'})\n ''')\n exp_code = L.pc('''\n for (_x, _y, _z) in COMP1:\n Q.rcadd((_x, _z))\n for (_x, _y, _z) in COMP2:\n Q.rcadd((_x, _z))\n ''', subst={'COMP1': comp1, 'COMP2': comp2})\n exp_comps = [comp1, comp2]\n \n self.assertEqual(code, exp_code)\n self.assertSequenceEqual(comps, exp_comps)\n \n # Force no refcounts, naive self-joins.\n \n spec = self.make_spec('(x, z) for (x, y) in R for (y, z) in R', [])\n code, comps = make_comp_maint_code(\n spec, 'Q', 'R', 'add', L.pe('e'), '_',\n maint_impl='auxonly', rc='no',\n selfjoin='assume_disjoint')\n \n comp1 = L.pe('''\n COMP({(_x, _y, _z) for (_x, _y) in deltamatch(R, 'bb', e, 1)\n for (_y, _z) in R},\n [], {'impl': 'auxonly',\n '_deltarel': 'R',\n '_deltaelem': 'e',\n '_deltalhs': '(_x, _y)',\n '_deltaop': 'add'})\n ''')\n comp2 = L.pe('''\n COMP({(_x, _y, _z) for (_x, _y) in R\n for (_y, _z) in deltamatch(R, 'bb', e, 1)},\n [], {'impl': 'auxonly',\n '_deltarel': 'R',\n '_deltaelem': 'e',\n '_deltalhs': '(_y, _z)',\n '_deltaop': 'add'})\n ''')\n exp_code = L.pc('''\n for (_x, _y, _z) in COMP1:\n Q.add((_x, _z))\n for (_x, _y, _z) in COMP2:\n Q.add((_x, _z))\n ''', subst={'COMP1': comp1, 'COMP2': comp2})\n exp_comps = [comp1, comp2]\n \n self.assertEqual(code, exp_code)\n self.assertSequenceEqual(comps, exp_comps)\n \n def test_ucon_params(self):\n class DummyClause(EnumClause, ABCStruct):\n lhs = Field()\n rel = Field()\n con_mask = (False, True)\n \n # Basic.\n join = Join([\n DummyClause(['x', 'y'], 'R'),\n DummyClause(['y', 'z'], 'R'),\n ], CF, None)\n spec = CompSpec(join, L.pe('(x, z)'), ['x', 'y', 'z'])\n uncons = spec.get_uncon_params()\n exp_uncons = ['x']\n self.assertSequenceEqual(uncons, exp_uncons)\n \n # Cycle.\n join = Join([\n DummyClause(['x', 'x'], 'R'),\n ], CF, None)\n spec = CompSpec(join, L.pe('x'), ['x'])\n uncons = spec.get_uncon_params()\n exp_uncons = ['x']\n self.assertSequenceEqual(uncons, exp_uncons)\n \n # Cycle with two distinct minimal sets of uncons.\n join = Join([\n DummyClause(['x', 'y'], 'R'),\n DummyClause(['y', 'x'], 'R'),\n ], CF, None)\n spec = CompSpec(join, L.pe('(x, y)'), ['x', 'y'])\n uncons = spec.get_uncon_params()\n exp_uncons = ['x']\n self.assertSequenceEqual(uncons, exp_uncons)\n \n def test_tupletreeconstrs(self):\n expr = L.pe('(a, (b, c), (d + 1, f + 1))')\n constrs = TupleTreeConstraintMaker.run(expr, 'R', '_')\n exp_constrs = {('R', ('<T>', 'R.1', 'R.2', 'R.3')),\n ('R.1', '_a'),\n ('R.2', ('<T>', 'R.2.1', 'R.2.2')),\n ('R.2.1', '_b'),\n ('R.2.2', '_c'),\n ('R.3', ('<T>', 'R.3.1', 'R.3.2'))}\n self.assertCountEqual(constrs, exp_constrs)\n \n def test_domains(self):\n spec = self.make_spec('(a, c) for (a, b, b) in R for (b, c) in S '\n 'for d in T for _ in U for (_, _) in U '\n 'if a != c', [])\n constrs = spec.get_domain_constraints('Q')\n exp_constrs = [\n ('R', ('<T>', 'R.1', 'R.2', 'R.3')),\n ('R.1', 'Q_a'),\n ('R.2', 'Q_b'),\n ('R.3', 'Q_b'),\n ('S', ('<T>', 'S.1', 'S.2')),\n ('S.1', 'Q_b'),\n ('S.2', 'Q_c'),\n ('T', 'Q_d'),\n ('U', ('<T>', 'U.1', 'U.2')),\n ('Q', ('<T>', 'Q.1', 'Q.2')),\n ('Q.1', 'Q_a'),\n ('Q.2', 'Q_c'),\n ]\n self.assertCountEqual(constrs, exp_constrs)\n \n def test_memberships(self):\n from incoq.compiler.tup import TupClauseFactory_Mixin\n spec = self.make_spec('(a, c) for (a, b, tup) in R '\n 'for (tup, c, d) in _TUP2 '\n 'for d in T for _ in U for (_, _) in U '\n 'if a != c', [],\n CF=TupClauseFactory_Mixin)\n mapping = spec.get_membership_constraints()\n exp_mapping = {\n 'a': {'R.1'},\n 'b': {'R.2'},\n 'c': {'R.3.1'},\n 'd': {'R.3.2', 'T'},\n 'tup': {'R.3'},\n }\n self.assertCountEqual(mapping, exp_mapping)\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.7272727489471436, "alphanum_fraction": 0.7272727489471436, "avg_line_length": 17.33333396911621, "blob_id": "36dda2680b9905111df442362130286c18b5f12e", "content_id": "bc64751932b46a9bf39e0ade1f307dcb830b04b7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 55, "license_type": "no_license", "max_line_length": 28, "num_lines": 3, "path": "/experiments/django/django_orig.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from .django_in import *\n\ndo_query_nodemand = do_query\n" }, { "alpha_fraction": 0.49594882130622864, "alphanum_fraction": 0.552452027797699, "avg_line_length": 35.364341735839844, "blob_id": "abe85268aaa13c7a848a9a9adec73323ed538200", "content_id": "74fd5d1a89662ec081d95c3793f6d1767a61d65a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4690, "license_type": "no_license", "max_line_length": 75, "num_lines": 129, "path": "/incoq/tests/programs/aggr/minmax_out.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from incoq.runtime import *\n# Aggr1 := min(R, None)\n# Aggr2 := max(R, None)\n_m_Aggr1_u = Map()\ndef _maint__m_Aggr1_u_add(_e):\n v7_1 = _e\n if (() not in _m_Aggr1_u):\n _m_Aggr1_u[()] = set()\n _m_Aggr1_u[()].add(v7_1)\n\ndef _maint__m_Aggr1_u_remove(_e):\n v8_1 = _e\n _m_Aggr1_u[()].remove(v8_1)\n if (len(_m_Aggr1_u[()]) == 0):\n del _m_Aggr1_u[()]\n\n_m_Aggr2_u = Map()\ndef _maint__m_Aggr2_u_add(_e):\n v5_1 = _e\n if (() not in _m_Aggr2_u):\n _m_Aggr2_u[()] = set()\n _m_Aggr2_u[()].add(v5_1)\n\ndef _maint__m_Aggr2_u_remove(_e):\n v6_1 = _e\n _m_Aggr2_u[()].remove(v6_1)\n if (len(_m_Aggr2_u[()]) == 0):\n del _m_Aggr2_u[()]\n\ndef _maint_Aggr2_add(_e):\n v3_v1 = _e\n v3_val = _m_Aggr2_u.singlelookup((), ((Tree(), None), 0))\n (v3_state, v3_count) = v3_val\n (v3_tree, _) = v3_state\n v3_tree[v3_v1] = None\n v3_state = (v3_tree, v3_tree.__max__())\n v3_val = (v3_state, (v3_count + 1))\n if (not (len((_m_Aggr2_u[()] if (() in _m_Aggr2_u) else set())) == 0)):\n v3_elem = _m_Aggr2_u.singlelookup(())\n # Begin maint _m_Aggr2_u before \"Aggr2.remove(v3_elem)\"\n _maint__m_Aggr2_u_remove(v3_elem)\n # End maint _m_Aggr2_u before \"Aggr2.remove(v3_elem)\"\n # Begin maint _m_Aggr2_u after \"Aggr2.add(v3_val)\"\n _maint__m_Aggr2_u_add(v3_val)\n # End maint _m_Aggr2_u after \"Aggr2.add(v3_val)\"\n\ndef _maint_Aggr2_remove(_e):\n v4_v1 = _e\n v4_val = _m_Aggr2_u.singlelookup(())\n if (v4_val[1] == 1):\n v4_elem = _m_Aggr2_u.singlelookup(())\n # Begin maint _m_Aggr2_u before \"Aggr2.remove(v4_elem)\"\n _maint__m_Aggr2_u_remove(v4_elem)\n # End maint _m_Aggr2_u before \"Aggr2.remove(v4_elem)\"\n else:\n (v4_state, v4_count) = v4_val\n (v4_tree, _) = v4_state\n del v4_tree[v4_v1]\n v4_state = (v4_tree, v4_tree.__max__())\n v4_val = (v4_state, (v4_count - 1))\n v4_elem = _m_Aggr2_u.singlelookup(())\n # Begin maint _m_Aggr2_u before \"Aggr2.remove(v4_elem)\"\n _maint__m_Aggr2_u_remove(v4_elem)\n # End maint _m_Aggr2_u before \"Aggr2.remove(v4_elem)\"\n # Begin maint _m_Aggr2_u after \"Aggr2.add(v4_val)\"\n _maint__m_Aggr2_u_add(v4_val)\n # End maint _m_Aggr2_u after \"Aggr2.add(v4_val)\"\n\ndef _maint_Aggr1_add(_e):\n v1_v1 = _e\n v1_val = _m_Aggr1_u.singlelookup((), ((Tree(), None), 0))\n (v1_state, v1_count) = v1_val\n (v1_tree, _) = v1_state\n v1_tree[v1_v1] = None\n v1_state = (v1_tree, v1_tree.__min__())\n v1_val = (v1_state, (v1_count + 1))\n if (not (len((_m_Aggr1_u[()] if (() in _m_Aggr1_u) else set())) == 0)):\n v1_elem = _m_Aggr1_u.singlelookup(())\n # Begin maint _m_Aggr1_u before \"Aggr1.remove(v1_elem)\"\n _maint__m_Aggr1_u_remove(v1_elem)\n # End maint _m_Aggr1_u before \"Aggr1.remove(v1_elem)\"\n # Begin maint _m_Aggr1_u after \"Aggr1.add(v1_val)\"\n _maint__m_Aggr1_u_add(v1_val)\n # End maint _m_Aggr1_u after \"Aggr1.add(v1_val)\"\n\ndef _maint_Aggr1_remove(_e):\n v2_v1 = _e\n v2_val = _m_Aggr1_u.singlelookup(())\n if (v2_val[1] == 1):\n v2_elem = _m_Aggr1_u.singlelookup(())\n # Begin maint _m_Aggr1_u before \"Aggr1.remove(v2_elem)\"\n _maint__m_Aggr1_u_remove(v2_elem)\n # End maint _m_Aggr1_u before \"Aggr1.remove(v2_elem)\"\n else:\n (v2_state, v2_count) = v2_val\n (v2_tree, _) = v2_state\n del v2_tree[v2_v1]\n v2_state = (v2_tree, v2_tree.__min__())\n v2_val = (v2_state, (v2_count - 1))\n v2_elem = _m_Aggr1_u.singlelookup(())\n # Begin maint _m_Aggr1_u before \"Aggr1.remove(v2_elem)\"\n _maint__m_Aggr1_u_remove(v2_elem)\n # End maint _m_Aggr1_u before \"Aggr1.remove(v2_elem)\"\n # Begin maint _m_Aggr1_u after \"Aggr1.add(v2_val)\"\n _maint__m_Aggr1_u_add(v2_val)\n # End maint _m_Aggr1_u after \"Aggr1.add(v2_val)\"\n\nfor x in [1, 2, 3, 4, 5]:\n # Begin maint Aggr2 after \"R.add(x)\"\n _maint_Aggr2_add(x)\n # End maint Aggr2 after \"R.add(x)\"\n # Begin maint Aggr1 after \"R.add(x)\"\n _maint_Aggr1_add(x)\n # End maint Aggr1 after \"R.add(x)\"\n# Begin maint Aggr1 before \"R.remove(5)\"\n_maint_Aggr1_remove(5)\n# End maint Aggr1 before \"R.remove(5)\"\n# Begin maint Aggr2 before \"R.remove(5)\"\n_maint_Aggr2_remove(5)\n# End maint Aggr2 before \"R.remove(5)\"\nprint(_m_Aggr1_u.singlelookup((), ((Tree(), None), 0))[0][1])\nprint(_m_Aggr2_u.singlelookup((), ((Tree(), None), 0))[0][1])\nfor x in [1, 2, 3, 4]:\n # Begin maint Aggr1 before \"R.remove(x)\"\n _maint_Aggr1_remove(x)\n # End maint Aggr1 before \"R.remove(x)\"\n # Begin maint Aggr2 before \"R.remove(x)\"\n _maint_Aggr2_remove(x)\n # End maint Aggr2 before \"R.remove(x)\"" }, { "alpha_fraction": 0.5706352591514587, "alphanum_fraction": 0.571190595626831, "avg_line_length": 34.730159759521484, "blob_id": "469d38350654343f6a4a755d37e516ec65e557dd", "content_id": "99939820b99749f4ef25aa711e65b42291a87888", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 18008, "license_type": "no_license", "max_line_length": 77, "num_lines": 504, "path": "/incoq/compiler/cost/analyze.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Asymptotic cost analysis of program fragments.\"\"\"\n\n\n__all__ = [\n 'CostAnalyzer',\n 'func_costs',\n 'CostLabelAdder',\n 'analyze_costs',\n]\n\n\nimport incoq.compiler.incast as L\nfrom incoq.compiler.set import Mask\nfrom incoq.compiler.aggr import IncAggr\n\nfrom .cost import *\n\n\nclass CostAnalyzer(L.NodeVisitor):\n \n \"\"\"Determine a cost for a chunk of code. The cost is expressed in\n terms of the given args. Costs for function calls appearing in the\n code are determined using argmap and costmap.\n \"\"\"\n \n # Most nodes either take constant time or the sum of the costs\n # of their children. We list these nodes below. The remaining\n # ones have visitor handlers.\n #\n # For conciseness, the lists may use a non-terminal node type\n # instead of all of its terminal node types, e.g. expr_context\n # instead of Load, Store, etc.\n #\n # Lists are by name, and programmatically replaced by nodes\n # after.\n \n const_nodes = [\n 'FunctionDef', 'ClassDef', 'Import', 'ImportFrom',\n 'Global', 'Nonlocal', 'Pass', 'Break', 'Continue',\n 'Lambda', 'Yield', 'YieldFrom', 'Num', 'Str', 'Bytes',\n 'NameConstant', 'Ellipsis', 'Name', 'NameConstant',\n \n 'expr_context', 'boolop', 'operator', 'unaryop', 'cmpop',\n 'alias',\n \n 'Comment',\n \n 'NOptions', 'QOptions', 'DeltaMatch',\n ]\n \n sum_nodes = [\n 'Return', 'Delete', 'Assign', 'AugAssign', 'If', 'With',\n 'Raise', 'Try', 'Assert', 'Expr',\n 'BoolOp', 'BinOp', 'UnaryOp', 'IfExp', 'Dict', 'Set',\n 'Compare', 'Attribute', 'Subscript', 'Starred',\n 'List', 'Tuple',\n \n 'slice', 'excepthandler', 'arguments', 'arg', 'keyword',\n 'withitem',\n \n 'Maintenance', 'SetUpdate', 'RCSetRefUpdate', 'IsEmpty',\n 'GetRef', 'AssignKey', 'DelKey', 'Lookup', 'ImgLookup',\n 'RCImgLookup', 'SMLookup', 'NoDemQuery',\n ]\n \n const_nodes = tuple(getattr(L, n) for n in const_nodes)\n sum_nodes = tuple(getattr(L, n) for n in sum_nodes)\n \n # We also have lists of known constant-time built-in functions\n # and methods.\n \n const_funcs = [\n 'set', 'Set', 'Obj', 'Tree',\n 'isinstance', 'hasattr',\n 'next', 'iter',\n 'globals',\n 'len', # constant because when it's used in generated code,\n # it's used for checking tuple arity, not aggregate\n # queries\n 'max2', 'min2', # constant because they're used on a\n # fixed number of arguments\n ]\n \n const_meths = [\n '__min__', '__max__', 'singlelookup',\n 'elements', # Note that this is the cost of invoking the method,\n # not iterating over its result.\n 'peek', 'ping',\n ]\n \n def __init__(self, args, argmap, costmap, *, warn=False):\n super().__init__()\n self.args = args\n self.argmap = argmap\n self.costmap = costmap\n self.warn = warn\n \n def WarnUnknownCost(self, node):\n \"\"\"Return UnknownCost, but also print the node that led to it\n if self.warn is True.\n \"\"\"\n if self.warn:\n print('---- Unknown cost ---- ' + str(L.ts(node)))\n return UnknownCost()\n \n def WarnNameCost(self, node, name):\n \"\"\"As above, but return a NameCost to act as a placeholder\n instead.\n \"\"\"\n unname = 'UNKNOWN_' + name\n if self.warn:\n print('---- Unknown cost ---- ' + str(L.ts(node)) +\n ' (using placeholder ' + unname + ')')\n return NameCost(unname)\n \n def process(self, tree):\n res = super().process(tree)\n res = Simplifier.run(res)\n res = normalize(res)\n return res\n \n def visit(self, tree):\n # Primitive values in the tree are considered to have unit cost.\n if isinstance(tree, L.AST):\n res = self.node_visit(tree)\n elif isinstance(tree, tuple):\n res = self.seq_visit(tree)\n else:\n res = UnitCost()\n \n return res\n \n def generic_visit(self, node):\n # Dispatch based on the lists above.\n if isinstance(node, self.const_nodes):\n return UnitCost()\n \n elif isinstance(node, self.sum_nodes):\n costs = []\n for field in node._fields:\n value = getattr(node, field)\n cost = self.visit(value)\n costs.append(cost)\n return SumCost(costs)\n \n else:\n raise AssertionError('Unhandled node type: ' +\n type(node).__name__)\n \n def seq_visit(self, seq):\n # Sum up the costs of a sequence of nodes.\n costs = []\n for item in seq:\n cost = self.visit(item)\n costs.append(cost)\n return SumCost(costs)\n \n def expr_tosizecost(self, expr):\n \"\"\"Turn an iterated expression into a cost bound for its\n cardinality.\n \"\"\"\n if isinstance(expr, L.Name):\n return NameCost(expr.id)\n \n # Catch case of iterating over a delta set.\n # We'll just say O(delta set), even though it can have\n # duplicates.\n elif (isinstance(expr, L.Call) and\n isinstance(expr.func, L.Attribute) and\n isinstance(expr.func.value, L.Name) and\n expr.func.attr == 'elements'):\n return NameCost(expr.func.value.id)\n \n elif isinstance(expr, L.SetMatch):\n if isinstance(expr.target, (L.Set, L.DeltaMatch)):\n return UnitCost()\n elif (isinstance(expr.target, L.Name) and\n L.is_vartuple(expr.key)):\n keys = L.get_vartuple(expr.key)\n if all(k in self.args for k in keys):\n return DefImgsetCost(expr.target.id, Mask(expr.mask),\n L.get_vartuple(expr.key))\n else:\n return IndefImgsetCost(expr.target.id, Mask(expr.mask))\n else:\n return self.WarnUnknownCost(expr)\n \n elif isinstance(expr, L.DeltaMatch):\n return UnitCost()\n \n elif isinstance(expr, (L.Set, L.List, L.Tuple, L.Dict)):\n return UnitCost()\n \n else:\n return self.WarnUnknownCost(expr)\n \n def visit_For(self, node):\n # For loops are the one-time cost of evaluating the iter,\n # plus the product of the number of times repeated (size\n # of iter) with the cost of the body and evaluating the\n # target, plus the cost of the else branch.\n targetcost = self.visit(node.target)\n itercost = self.visit(node.iter)\n repeatcost = self.expr_tosizecost(node.iter)\n bodycost = self.visit(node.body)\n orelsecost = self.visit(node.orelse)\n loopcost = ProductCost([repeatcost, SumCost([bodycost, targetcost])])\n return SumCost([itercost, loopcost, orelsecost])\n \n def visit_While(self, node):\n # While loops run an unknown number of times.\n # Their cost is the product of an unknown with the\n # body cost, plus the else branch.\n bodycost = self.visit(node.body)\n orelsecost = self.visit(node.orelse)\n loopcost = ProductCost([UnknownCost(), bodycost])\n return SumCost([loopcost, orelsecost])\n \n def comp_helper(self, node):\n # Comprehensions and their variants are the products of all\n # generators and the result expression.\n gencosts = [self.visit(g) for g in node.generators]\n eltcost = self.visit(node.elt)\n return ProductCost(gencosts + [eltcost])\n \n visit_ListComp = comp_helper\n visit_SetComp = comp_helper\n \n def visit_DictComp(self, node):\n gencosts = [self.visit(g) for g in node.generators]\n eltcost = SumCost([self.visit(node.key), self.visit(node.value)])\n return ProductCost(gencosts + [eltcost])\n \n visit_GeneratorExp = comp_helper\n \n def visit_comprehension(self, node):\n itercost = self.visit(node.iter)\n targetcost = self.visit(node.target)\n ifcost = self.visit(node.ifs)\n repeatcost = self.expr_tosizecost(node.iter)\n loopcost = ProductCost([SumCost([targetcost, ifcost]), repeatcost])\n return SumCost([itercost, loopcost])\n \n # IncAST-specific nodes.\n \n def visit_MacroUpdate(self, node):\n # Cost of evaluating each side, plus cost of size of each side.\n leftcost = self.visit(node.target)\n leftsize = self.expr_tosizecost(node.target)\n if node.other is None:\n rightcost = rightsize = UnitCost()\n else:\n rightcost = self.visit(node.other)\n rightsize = self.expr_tosizecost(node.other)\n return SumCost([leftcost, leftsize, rightcost, rightsize])\n \n def visit_DemQuery(self, node):\n # Translate into a call to the demand function. Cost is\n # that plus the result retrieval cost.\n callnode = L.Call(L.ln(L.N.queryfunc(node.demname)), node.args,\n (), None, None)\n callcost = self.visit(callnode)\n retrievecost = self.visit(node.value)\n return SumCost([callcost, retrievecost])\n \n def visit_SetMatch(self, node):\n # Cost of evaluating. Size doesn't matter since the image\n # set can be returned in constant time via auxiliary map.\n return SumCost([self.visit(node.target), self.visit(node.key)])\n \n def visit_Enumerator(self, node):\n itercost = self.visit(node.iter)\n targetcost = self.visit(node.target)\n repeatcost = self.expr_tosizecost(node.iter)\n loopcost = ProductCost([targetcost, repeatcost])\n return SumCost([itercost, loopcost])\n \n def visit_Comp(self, node):\n # Cost based on a left-to-right evaluation order.\n # We look at the clauses rightmost first and multiply\n # or add its cost depending on whether it is an\n # Enumerator or condition.\n cost = self.visit(node.resexp)\n for cl in node.clauses:\n clcost = self.visit(cl)\n if isinstance(cl, L.Enumerator):\n cost = ProductCost([clcost, cost])\n else:\n cost = SumCost([clcost, cost])\n return cost\n \n def visit_Aggregate(self, node):\n # Evaluation cost plus size cost.\n evalcost = self.visit(node.value)\n sizecost = self.expr_tosizecost(node.value)\n return SumCost([evalcost, sizecost])\n \n # The all-powerful Call case.\n \n def visit_Call(self, node):\n # If the function is incoq.runtime-qualified, strip it\n # so we can match const_funcs.\n if (isinstance(node.func, L.Attribute) and\n isinstance(node.func.value, L.Attribute) and\n node.func.value.attr == 'runtime' and\n isinstance(node.func.value.value, L.Name) and\n node.func.value.value.id == 'incoq'):\n call = node._replace(func=L.ln(node.func.attr))\n else:\n call = node\n \n # Certain calls of methods are recognized\n # as constant.\n if (isinstance(call.func, L.Attribute) and\n call.func.attr in self.const_meths):\n return UnitCost()\n \n # Non-plain function calls are unknown.\n if not L.is_plaincall(call):\n return self.WarnUnknownCost(node)\n name, args = L.get_plaincall(call)\n \n # Some known built-in constant functions are recognized.\n if name in self.const_funcs:\n return UnitCost()\n \n # If the name doesn't appear in argmap and costmap, it's\n # unknown. But return a NameCost representing it anyway\n # so more info can be added later.\n if not (name in self.argmap and name in self.costmap):\n return self.WarnNameCost(node, name)\n \n formals = self.argmap[name]\n assert len(args) == len(formals), \\\n 'Function call args ({}) don\\'t match definition ({})'.format(\n len(args), len(formals))\n \n # The call's actual arguments are preserved in the returned cost\n # only if they are simple variables and they are parameters to\n # the caller's own function. Otherwise they get simplified away.\n formalsub = []\n for a in args:\n if isinstance(a, L.Name) and a.id in self.args:\n formalsub.append(a.id)\n else:\n formalsub.append(None)\n subst = dict(zip(formals, formalsub))\n \n cost = ImgkeySubstitutor.run(self.costmap[name], subst)\n return cost\n\n\ndef func_costs(tree, *, warn=False):\n funcs = L.PlainFunctionFinder.run(tree, stmt_only=False)\n \n param_map, body_map, _edges, order = L.FunctionInfoGetter.run(\n tree, funcs, require_nonrecursive=True)\n \n cost_map = {}\n for f in order:\n body = body_map[f]\n cost = CostAnalyzer.run(body, param_map[f], param_map, cost_map,\n warn=warn)\n cost_map[f] = cost\n \n return cost_map\n\n\nclass CostLabelAdder(L.NodeTransformer):\n \n \"\"\"Add a cost comment to each function that we have info for.\"\"\"\n \n def __init__(self, costmap):\n super().__init__()\n self.costmap = costmap\n \n def visit_FunctionDef(self, node):\n cost = self.costmap.get(node.name, None)\n if cost is None:\n return node\n else:\n pretty = PrettyPrinter.run(cost)\n header = (L.Comment('Cost: O({})'.format(pretty)),)\n return node._replace(body=header + node.body)\n\n\ndef type_to_cost(t, pathcosts=None, path=()):\n \"\"\"Turn a type into a cost term. Usually this is based on the\n domain size of the type, i.e. the number of possible values\n for the type.\n \n If path and pathcosts are given, they are used to possibly\n override this with a custom cost. path is a tuple of indices\n indicating an access path in a tuple tree value. pathcosts is\n a mapping from such paths to the cost to return.\n \"\"\"\n if pathcosts is not None and path in pathcosts:\n return pathcosts[path]\n \n if t in [L.toptype, L.bottomtype]:\n return UnknownCost()\n elif isinstance(t, L.PrimitiveType):\n return NameCost(t.t.__name__)\n elif isinstance(t, L.TupleType):\n return ProductCost([type_to_cost(et, pathcosts, path + (i,))\n for i, et in enumerate(t.ets)])\n elif isinstance(t, (L.ObjType, L.RefineType)):\n return NameCost(t.name)\n elif isinstance(t, L.EnumType):\n return UnitCost()\n else:\n return UnknownCost()\n\n\nclass VarRewriter(CostTransformer):\n \n \"\"\"Rewrite cost terms by replacing costs based on variable names\n with costs based on their types.\n \n If usets_constant is True, also eliminate U-set terms as constant.\n \"\"\"\n \n def __init__(self, manager, *, usets_constant=False):\n super().__init__()\n self.manager = manager\n self.usets_constant = usets_constant\n \n def process(self, tree):\n res = super().process(tree)\n res = Simplifier.run(res)\n res = normalize(res)\n return res\n \n def visit_NameCost(self, cost):\n rel = cost.name\n \n if self.usets_constant and rel.startswith('_U_'):\n return UnitCost()\n \n t = self.manager.vartypes.get(rel, None)\n if not isinstance(t, L.SetType):\n return cost\n \n c = type_to_cost(t.et)\n if isinstance(c, UnknownCost):\n return cost\n return c\n \n def visit_IndefImgsetCost(self, cost):\n rel = cost.rel\n \n if self.usets_constant and rel.startswith('_U_'):\n return UnitCost()\n \n t = self.manager.vartypes.get(rel, None)\n if not isinstance(t, L.SetType):\n return cost\n if not (isinstance(t.et, L.TupleType) and\n len(cost.mask) == len(t.et.ets)):\n return cost\n \n # Get indices and types of unbound parts.\n ets = [(i, et) for i, et in enumerate(t.et.ets)\n if cost.mask.parts[i] == 'u']\n \n # Special case for aggregates: If the keys have no\n # wildcards, the result component is functionally\n # determined and can therefore be omitted from the cost.\n aggr_names = [name for name, inv in self.manager.invariants.items()\n if isinstance(inv, IncAggr)]\n if (rel in aggr_names and \n 'w' not in cost.mask.parts[:-1]):\n ets = [(i, et) for i, et in ets\n if i != len(cost.mask.parts) - 1]\n \n # The index i stuff is leftover from when domcosts were used.\n ecosts = [type_to_cost(et) for i, et in ets]\n if any(isinstance(c, UnknownCost) for c in ecosts):\n return cost\n return ProductCost(ecosts)\n \n def visit_DefImgsetCost(self, cost):\n return self.visit_IndefImgsetCost(cost)\n\n\ndef analyze_costs(manager, tree, *, rewrite_types=False,\n verbose=False, warn=False):\n \"\"\"Analyze function costs. Return a tree modified by adding cost\n annotations and a dictionary of these costs.\n \"\"\"\n usets_constant = manager.options.get_opt('default_uset_lru') is not None\n costmap = func_costs(tree, warn=warn)\n \n if rewrite_types:\n for k in costmap:\n c1 = costmap[k]\n costmap[k] = VarRewriter.run(costmap[k], manager,\n usets_constant=usets_constant)\n c2 = costmap[k]\n if verbose and c1 != c2:\n print('{} ---> {}'.format(c1, c2))\n \n tree = CostLabelAdder.run(tree, costmap)\n return tree, costmap\n" }, { "alpha_fraction": 0.7153846025466919, "alphanum_fraction": 0.7153846025466919, "avg_line_length": 17.571428298950195, "blob_id": "79c6c856200264b0d302fe8e6dc86a36c38ac091", "content_id": "d3ff6263928eece023a025b09b86faf486015d24", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 130, "license_type": "no_license", "max_line_length": 50, "num_lines": 7, "path": "/incoq/transform/__init__.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Utilities for scripting invocation of incoq.\"\"\"\n\n\n# Exports.\nfrom .statsdb import *\nfrom .schema import *\nfrom .trans import *\n" }, { "alpha_fraction": 0.5873146653175354, "alphanum_fraction": 0.5889621376991272, "avg_line_length": 23.775510787963867, "blob_id": "b13b07b75896dcef9aa2fdd59e8a876279b73c6f", "content_id": "f53e7eabe76438663b19411ab35bf7c7d368ca69", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1214, "license_type": "no_license", "max_line_length": 68, "num_lines": 49, "path": "/experiments/twitter/twitter_tom.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "# This example DOES NOT WORK. The current OSQ system implementation\n# does not generate clauses over U, so the computed maintenance join\n# is actually incorrect. In this example, this manifests as a\n# reference count error. \n\nfrom incoq.runtime import *\nfrom osq import query\n\ndef make_user(email, loc):\n u = Obj()\n u.followers = Set()\n u.email = email\n u.loc = loc\n return u\n\ndef make_group():\n g = Set()\n return g\n\ndef follow(u, c):\n assert u not in c.followers\n c.followers.add(u)\n\ndef unfollow(u, c):\n assert u in c.followers\n c.followers.remove(u)\n\ndef join_group(u, g):\n assert u not in g\n g.add(u)\n\ndef leave_group(u, g):\n assert u in g\n g.remove(u)\n\ndef change_loc(u, loc):\n # In the original program we do \"del u.loc\" for strictness,\n # but that's not necessary or allowed in Tom's system.\n u.loc = loc\n\ndef do_query(celeb, group):\n return query('celeb, group -> '\n '{user.email for user in celeb.followers '\n 'for user2 in group '\n 'if user.loc == \"NYC\" '\n 'if user is user2}',\n celeb, group)\n\ndo_query_nodemand = do_query\n" }, { "alpha_fraction": 0.43273380398750305, "alphanum_fraction": 0.4972422122955322, "avg_line_length": 38.71904754638672, "blob_id": "c03284eb20b628f01fb9bb4cc7dd903690c19bbc", "content_id": "55564347a2d9e032376fd8297ff7a94b0e183e20", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8340, "license_type": "no_license", "max_line_length": 140, "num_lines": 210, "path": "/incoq/tests/programs/deminc/wildcard_out.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from incoq.runtime import *\n# Comp1 := {(a, b) : _ in _U_Comp1, (a, b) in R, (b, _) in R}\n# Comp1_Tb1 := {b : (a, b) in R}\n# Comp1_dR2 := {(b, _v1) : b in Comp1_Tb1, (b, _v1) in R}\n_m_Comp1_out = Map()\ndef _maint__m_Comp1_out_add(_e):\n (v21_1, v21_2) = _e\n if (v21_1 not in _m_Comp1_out):\n _m_Comp1_out[v21_1] = set()\n _m_Comp1_out[v21_1].add(v21_2)\n\ndef _maint__m_Comp1_out_remove(_e):\n (v22_1, v22_2) = _e\n _m_Comp1_out[v22_1].remove(v22_2)\n if (len(_m_Comp1_out[v22_1]) == 0):\n del _m_Comp1_out[v22_1]\n\n_m_R_in = Map()\ndef _maint__m_R_in_add(_e):\n (v19_1, v19_2) = _e\n if (v19_2 not in _m_R_in):\n _m_R_in[v19_2] = set()\n _m_R_in[v19_2].add(v19_1)\n\n_m_Comp1_dR2_bw = Map()\ndef _maint__m_Comp1_dR2_bw_add(_e):\n (v17_1, v17_2) = _e\n if (v17_1 not in _m_Comp1_dR2_bw):\n _m_Comp1_dR2_bw[v17_1] = RCSet()\n if (() not in _m_Comp1_dR2_bw[v17_1]):\n _m_Comp1_dR2_bw[v17_1].add(())\n else:\n _m_Comp1_dR2_bw[v17_1].incref(())\n\ndef _maint__m_Comp1_dR2_bw_remove(_e):\n (v18_1, v18_2) = _e\n if (_m_Comp1_dR2_bw[v18_1].getref(()) == 1):\n _m_Comp1_dR2_bw[v18_1].remove(())\n else:\n _m_Comp1_dR2_bw[v18_1].decref(())\n if (len(_m_Comp1_dR2_bw[v18_1]) == 0):\n del _m_Comp1_dR2_bw[v18_1]\n\n_m_R_bw = Map()\ndef _maint__m_R_bw_add(_e):\n (v15_1, v15_2) = _e\n if (v15_1 not in _m_R_bw):\n _m_R_bw[v15_1] = RCSet()\n if (() not in _m_R_bw[v15_1]):\n _m_R_bw[v15_1].add(())\n else:\n _m_R_bw[v15_1].incref(())\n\n_m__U_Comp1_w = Map()\ndef _maint__m__U_Comp1_w_add(_e):\n if (() not in _m__U_Comp1_w):\n _m__U_Comp1_w[()] = RCSet()\n if (() not in _m__U_Comp1_w[()]):\n _m__U_Comp1_w[()].add(())\n else:\n _m__U_Comp1_w[()].incref(())\n\ndef _maint__m__U_Comp1_w_remove(_e):\n if (_m__U_Comp1_w[()].getref(()) == 1):\n _m__U_Comp1_w[()].remove(())\n else:\n _m__U_Comp1_w[()].decref(())\n if (len(_m__U_Comp1_w[()]) == 0):\n del _m__U_Comp1_w[()]\n\n_m_R_out = Map()\ndef _maint__m_R_out_add(_e):\n (v11_1, v11_2) = _e\n if (v11_1 not in _m_R_out):\n _m_R_out[v11_1] = set()\n _m_R_out[v11_1].add(v11_2)\n\ndef _maint_Comp1_dR2_Comp1_Tb1_add(_e):\n # Iterate {(v7_b, v7__v1) : v7_b in deltamatch(Comp1_Tb1, 'b', _e, 1), (v7_b, v7__v1) in R}\n v7_b = _e\n for v7__v1 in (_m_R_out[v7_b] if (v7_b in _m_R_out) else set()):\n # Begin maint _m_Comp1_dR2_bw after \"Comp1_dR2.add((v7_b, v7__v1))\"\n _maint__m_Comp1_dR2_bw_add((v7_b, v7__v1))\n # End maint _m_Comp1_dR2_bw after \"Comp1_dR2.add((v7_b, v7__v1))\"\n\ndef _maint_Comp1_dR2_Comp1_Tb1_remove(_e):\n # Iterate {(v8_b, v8__v1) : v8_b in deltamatch(Comp1_Tb1, 'b', _e, 1), (v8_b, v8__v1) in R}\n v8_b = _e\n for v8__v1 in (_m_R_out[v8_b] if (v8_b in _m_R_out) else set()):\n # Begin maint _m_Comp1_dR2_bw before \"Comp1_dR2.remove((v8_b, v8__v1))\"\n _maint__m_Comp1_dR2_bw_remove((v8_b, v8__v1))\n # End maint _m_Comp1_dR2_bw before \"Comp1_dR2.remove((v8_b, v8__v1))\"\n\ndef _maint_Comp1_dR2_R_add(_e):\n # Iterate {(v9_b, v9__v1) : v9_b in Comp1_Tb1, (v9_b, v9__v1) in deltamatch(R, 'bb', _e, 1)}\n (v9_b, v9__v1) = _e\n if (v9_b in Comp1_Tb1):\n # Begin maint _m_Comp1_dR2_bw after \"Comp1_dR2.add((v9_b, v9__v1))\"\n _maint__m_Comp1_dR2_bw_add((v9_b, v9__v1))\n # End maint _m_Comp1_dR2_bw after \"Comp1_dR2.add((v9_b, v9__v1))\"\n\nComp1_Tb1 = RCSet()\ndef _maint_Comp1_Tb1_R_add(_e):\n # Iterate {(v5_a, v5_b) : (v5_a, v5_b) in deltamatch(R, 'bb', _e, 1)}\n (v5_a, v5_b) = _e\n if (v5_b not in Comp1_Tb1):\n Comp1_Tb1.add(v5_b)\n # Begin maint Comp1_dR2 after \"Comp1_Tb1.add(v5_b)\"\n _maint_Comp1_dR2_Comp1_Tb1_add(v5_b)\n # End maint Comp1_dR2 after \"Comp1_Tb1.add(v5_b)\"\n else:\n Comp1_Tb1.incref(v5_b)\n\ndef _maint_Comp1__U_Comp1_add(_e):\n # Iterate {(v1_a, v1_b) : _ in deltamatch(_U_Comp1, 'w', _e, 1), (v1_a, v1_b) in R, (v1_b, _) in R}\n for _ in setmatch(({_e} if ((_m__U_Comp1_w[()] if (() in _m__U_Comp1_w) else RCSet()).getref(()) == 1) else {}), 'w', ()):\n for (v1_a, v1_b) in R:\n for _ in (_m_R_bw[v1_b] if (v1_b in _m_R_bw) else RCSet()):\n # Begin maint _m_Comp1_out after \"Comp1.add((v1_a, v1_b))\"\n _maint__m_Comp1_out_add((v1_a, v1_b))\n # End maint _m_Comp1_out after \"Comp1.add((v1_a, v1_b))\"\n\ndef _maint_Comp1__U_Comp1_remove(_e):\n # Iterate {(v2_a, v2_b) : _ in deltamatch(_U_Comp1, 'w', _e, 1), (v2_a, v2_b) in R, (v2_b, _) in R}\n for _ in setmatch(({_e} if ((_m__U_Comp1_w[()] if (() in _m__U_Comp1_w) else RCSet()).getref(()) == 1) else {}), 'w', ()):\n for (v2_a, v2_b) in R:\n for _ in (_m_R_bw[v2_b] if (v2_b in _m_R_bw) else RCSet()):\n # Begin maint _m_Comp1_out before \"Comp1.remove((v2_a, v2_b))\"\n _maint__m_Comp1_out_remove((v2_a, v2_b))\n # End maint _m_Comp1_out before \"Comp1.remove((v2_a, v2_b))\"\n\ndef _maint_Comp1_R_add(_e):\n v3_DAS = set()\n # Iterate {(v3_a, v3_b) : _ in _U_Comp1, (v3_a, v3_b) in deltamatch(R, 'bb', _e, 1), (v3_b, _) in R}\n (v3_a, v3_b) = _e\n for _ in (_m_R_bw[v3_b] if (v3_b in _m_R_bw) else RCSet()):\n for _ in (_m__U_Comp1_w[()] if (() in _m__U_Comp1_w) else RCSet()):\n if ((v3_a, v3_b) not in v3_DAS):\n v3_DAS.add((v3_a, v3_b))\n # Iterate {(v3_a, v3_b) : _ in _U_Comp1, (v3_a, v3_b) in R, (v3_b, _) in deltamatch(Comp1_dR2, 'bw', _e, 1), (v3_b, _) in Comp1_dR2}\n for v3_b in setmatch(({_e} if ((_m_Comp1_dR2_bw[_e[0]] if (_e[0] in _m_Comp1_dR2_bw) else RCSet()).getref(()) == 1) else {}), 'uw', ()):\n for v3_a in (_m_R_in[v3_b] if (v3_b in _m_R_in) else set()):\n for _ in (_m_Comp1_dR2_bw[v3_b] if (v3_b in _m_Comp1_dR2_bw) else RCSet()):\n for _ in (_m__U_Comp1_w[()] if (() in _m__U_Comp1_w) else RCSet()):\n if ((v3_a, v3_b) not in v3_DAS):\n v3_DAS.add((v3_a, v3_b))\n for (v3_a, v3_b) in v3_DAS:\n # Begin maint _m_Comp1_out after \"Comp1.add((v3_a, v3_b))\"\n _maint__m_Comp1_out_add((v3_a, v3_b))\n # End maint _m_Comp1_out after \"Comp1.add((v3_a, v3_b))\"\n del v3_DAS\n\n_U_Comp1 = RCSet()\n_UEXT_Comp1 = Set()\ndef demand_Comp1():\n '{(a, b) : _ in _U_Comp1, (a, b) in R, (b, _) in R}'\n if (() not in _U_Comp1):\n _U_Comp1.add(())\n # Begin maint _m__U_Comp1_w after \"_U_Comp1.add(())\"\n _maint__m__U_Comp1_w_add(())\n # End maint _m__U_Comp1_w after \"_U_Comp1.add(())\"\n # Begin maint Comp1 after \"_U_Comp1.add(())\"\n _maint_Comp1__U_Comp1_add(())\n # End maint Comp1 after \"_U_Comp1.add(())\"\n else:\n _U_Comp1.incref(())\n\ndef undemand_Comp1():\n '{(a, b) : _ in _U_Comp1, (a, b) in R, (b, _) in R}'\n if (_U_Comp1.getref(()) == 1):\n # Begin maint Comp1 before \"_U_Comp1.remove(())\"\n _maint_Comp1__U_Comp1_remove(())\n # End maint Comp1 before \"_U_Comp1.remove(())\"\n # Begin maint _m__U_Comp1_w before \"_U_Comp1.remove(())\"\n _maint__m__U_Comp1_w_remove(())\n # End maint _m__U_Comp1_w before \"_U_Comp1.remove(())\"\n _U_Comp1.remove(())\n else:\n _U_Comp1.decref(())\n\ndef query_Comp1():\n '{(a, b) : _ in _U_Comp1, (a, b) in R, (b, _) in R}'\n if (() not in _UEXT_Comp1):\n _UEXT_Comp1.add(())\n demand_Comp1()\n return True\n\nR = Set()\nfor (x, y) in [(1, 2), (2, 3), (3, 4)]:\n R.add((x, y))\n # Begin maint _m_R_in after \"R.add((x, y))\"\n _maint__m_R_in_add((x, y))\n # End maint _m_R_in after \"R.add((x, y))\"\n # Begin maint _m_R_bw after \"R.add((x, y))\"\n _maint__m_R_bw_add((x, y))\n # End maint _m_R_bw after \"R.add((x, y))\"\n # Begin maint _m_R_out after \"R.add((x, y))\"\n _maint__m_R_out_add((x, y))\n # End maint _m_R_out after \"R.add((x, y))\"\n # Begin maint Comp1_dR2 after \"R.add((x, y))\"\n _maint_Comp1_dR2_R_add((x, y))\n # End maint Comp1_dR2 after \"R.add((x, y))\"\n # Begin maint Comp1_Tb1 after \"R.add((x, y))\"\n _maint_Comp1_Tb1_R_add((x, y))\n # End maint Comp1_Tb1 after \"R.add((x, y))\"\n # Begin maint Comp1 after \"R.add((x, y))\"\n _maint_Comp1_R_add((x, y))\n # End maint Comp1 after \"R.add((x, y))\"\na = 1\nprint(sorted((query_Comp1() and (_m_Comp1_out[a] if (a in _m_Comp1_out) else set()))))" }, { "alpha_fraction": 0.5055432319641113, "alphanum_fraction": 0.534368097782135, "avg_line_length": 19.5, "blob_id": "1a93bf5242604ee4f565e3498e63d621a13703f1", "content_id": "6ff5e473cf62a569fd3cdfc174f6404a592c8da8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 451, "license_type": "no_license", "max_line_length": 72, "num_lines": 22, "path": "/incoq/tests/programs/comp/uset/nodemand_in.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "# Test comprehension occurrences that don't try to add\n# to the demand set.\n\nfrom incoq.runtime import *\n\nQUERYOPTIONS(\n '{z for (x, y) in E for (y2, z) in E if y == y2}',\n params = ['x'],\n uset_mode = 'all',\n impl = 'inc',\n)\n\nE = Set()\n\nfor a, b in {(1, 2), (2, 3), (2, 4)}:\n E.add((a, b))\n\nx = 1\n\nprint(sorted({z for (x, y) in E for (y2, z) in E if y == y2}))\n\nprint(sorted(NODEMAND({z for (x, y) in E for (y2, z) in E if y == y2})))\n" }, { "alpha_fraction": 0.6732673048973083, "alphanum_fraction": 0.6732673048973083, "avg_line_length": 13.428571701049805, "blob_id": "6df936ac053f8fb022aaf6793732a204f9990593", "content_id": "aff6c8b862117df83d7a6598b3d0d5c6db57fb46", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 101, "license_type": "no_license", "max_line_length": 23, "num_lines": 7, "path": "/incoq/compiler/cost/__init__.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Cost analysis.\"\"\"\n\n\n# Exports.\nfrom .cost import *\nfrom .analyze import *\nfrom .interact import *\n" }, { "alpha_fraction": 0.5019454956054688, "alphanum_fraction": 0.5447470545768738, "avg_line_length": 15.580645561218262, "blob_id": "df34b580e2a5a4054cb80665087dcfd991bd855a", "content_id": "c94bc21335085630650a017869dad19302b6baf5", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "Python", "length_bytes": 1028, "license_type": "no_license", "max_line_length": 60, "num_lines": 62, "path": "/experiments/cache/timetest.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "import sys\nimport gc\nimport pickle\nfrom time import process_time as pt\nfrom random import randrange\n\ngc.disable()\n\n\nn_ops = 100000\nn_trials = 10\nmax_x = 20000\n\n\nclass Obj:\n pass\n\n\ndef run(x, reps):\n objs = [Obj() for _ in range(max_x)]\n col = [(i, (objs[randrange(x)],)) for i in range(n_ops)]\n \n t1 = pt()\n for _ in range(reps):\n for (i, (o,)) in col:\n pass\n t2 = pt()\n return t2 - t1\n\n\n# Timing.\n\ndef avgrun(x, reps):\n print('-- ' + str(x) + ' --')\n times = []\n for _ in range(n_trials):\n r = run(x, reps)\n print(format(r, '.3f'))\n r /= n_ops * reps\n times.append(r)\n print()\n return sum(times) / len(times)\n\n\nxs = (list(range(250, 5001, 250)) + \n list(range(6000, 20001, 1000)))\n\n\npoints = []\nfor x in xs:\n y = avgrun(x, 500)\n points.append((x, y))\n\n\nxs, ys = zip(*points)\nprint(xs)\nprint(ys)\n\nwith open('timetest_out.pickle', 'wb') as f:\n pickle.dump(xs, f)\n pickle.dump(ys, f)\n print('Wrote out timetest_out.pickle.')\n" }, { "alpha_fraction": 0.5451072454452515, "alphanum_fraction": 0.5463396310806274, "avg_line_length": 25.6907901763916, "blob_id": "275cc8842bae75e1cd233dcc09627b6c54a2872e", "content_id": "c61876c8e4cb300feef544482dc850f5d72b84ef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8114, "license_type": "no_license", "max_line_length": 77, "num_lines": 304, "path": "/incoq/compiler/obj/objclause.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "\"\"\"Comprehension clauses over M/F-sets.\"\"\"\n\n\n__all__ = [\n 'MClause',\n 'MClause_NoTC',\n 'FClause',\n 'FClause_NoTC',\n 'MapClause',\n 'MapClause_NoTC',\n 'ObjClauseFactory_Mixin',\n]\n\n\nfrom simplestruct.type import checktype\nfrom simplestruct import Field\n\nimport incoq.compiler.incast as L\nfrom incoq.compiler.set import Mask\nfrom incoq.compiler.comp import (ClauseFactory, Rate, EnumClause)\nfrom incoq.compiler.comp.clause import ABCStruct\n\nfrom .match import mset_bindmatch, fset_bindmatch, mapset_bindmatch\nfrom .pairrel import (is_mrel, make_mrel, is_frel, get_frel_field, make_frel,\n is_maprel, make_maprel)\n\n\nclass MClause(EnumClause, ABCStruct):\n \n \"\"\"An enumerator over the M-set.\"\"\"\n \n cont = Field(str)\n item = Field(str)\n \n pat_mask = (False, True)\n con_mask = (False, True)\n tagsin_mask = (True, False)\n tagsout_mask = (False, True)\n \n def get_domain_constrs(self, prefix):\n return ()\n \n def get_membership_constrs(self):\n return ()\n \n typecheck = True\n \n @classmethod\n def from_expr(cls, node):\n \"\"\"Construct from a membership expression\n \n (<var>, <var>) in _M\n \"\"\"\n checktype(node, L.AST)\n \n left, op, right = L.get_cmp(node)\n checktype(op, L.In)\n lhs = L.get_vartuple(left)\n assert len(lhs) == 2\n cont, item = lhs\n rel = L.get_name(right)\n assert is_mrel(rel)\n return cls(cont, item)\n \n @classmethod\n def from_AST(cls, node, factory):\n \"\"\"Construct from Enumerator node of form\n \n (<var>, <var>) in _M\n \"\"\"\n checktype(node, L.Enumerator)\n \n lhs = L.get_vartuple(node.target)\n rel = L.get_name(node.iter)\n if not len(lhs) == 2:\n raise TypeError\n cont, item = lhs\n if not is_mrel(rel):\n raise TypeError\n return cls(cont, item)\n \n def __init__(self, cont, item):\n self.lhs = (cont, item)\n self.rel = make_mrel()\n super().__init__(self.lhs, self.rel)\n \n def rate(self, bindenv):\n mask = Mask.from_vars(self.lhs, bindenv)\n if mask.is_allunbound:\n return Rate.UNRUNNABLE\n return super().rate(bindenv)\n \n def get_code(self, bindenv, body):\n mask = Mask.from_vars(self.lhs, bindenv)\n bvars, uvars, _eqs = mask.split_vars(self.lhs)\n return mset_bindmatch(mask, bvars, uvars, body,\n typecheck=self.typecheck)\n\nclass MClause_NoTC(MClause):\n \n \"\"\"MClause without type checks in emitted code.\"\"\"\n \n cont = Field(str)\n item = Field(str)\n \n typecheck = False\n\n\nclass FClause(EnumClause, ABCStruct):\n \n \"\"\"An enumerator over an F-set.\"\"\"\n \n cont = Field(str)\n item = Field(str)\n field = Field(str)\n \n pat_mask = (False, True)\n con_mask = (False, True)\n tagsin_mask = (True, False)\n tagsout_mask = (False, True)\n \n def get_domain_constrs(self, prefix):\n return ()\n \n def get_membership_constrs(self):\n return ()\n \n typecheck = True\n \n @classmethod\n def from_expr(cls, node):\n \"\"\"Construct from a membership expression\n \n (<var>, <var>) in _F_<field>\n \"\"\"\n checktype(node, L.AST)\n \n left, op, right = L.get_cmp(node)\n checktype(op, L.In)\n lhs = L.get_vartuple(left)\n assert len(lhs) == 2\n cont, item = lhs\n rel = L.get_name(right)\n assert is_frel(rel)\n field = get_frel_field(rel)\n return cls(cont, item, field)\n \n @classmethod\n def from_AST(cls, node, factory):\n \"\"\"Construct from Enumerator node of form\n \n (<var>, <var>) in _F_<field>\n \"\"\"\n checktype(node, L.Enumerator)\n \n lhs = L.get_vartuple(node.target)\n rel = L.get_name(node.iter)\n if not len(lhs) == 2:\n raise TypeError\n cont, item = lhs\n if not is_frel(rel):\n raise TypeError\n field = get_frel_field(rel)\n return cls(cont, item, field)\n \n def __init__(self, cont, item, field):\n self.lhs = (cont, item)\n self.rel = make_frel(field)\n super().__init__(self.lhs, self.rel)\n \n def rate(self, bindenv):\n mask = Mask.from_vars(self.lhs, bindenv)\n if mask.is_allunbound:\n return Rate.UNRUNNABLE\n elif mask == Mask.OUT:\n return Rate.CONSTANT\n return super().rate(bindenv)\n \n def get_determined_vars(self, bindenv):\n if self.cont in bindenv and self.item != '_':\n return (self.item,)\n else:\n return ()\n \n def get_code(self, bindenv, body):\n mask = Mask.from_vars(self.lhs, bindenv)\n bvars, uvars, _eqs = mask.split_vars(self.lhs)\n return fset_bindmatch(self.field, mask, bvars, uvars, body,\n typecheck=self.typecheck)\n\nclass FClause_NoTC(FClause):\n \n \"\"\"FClause without type checks in emitted code.\"\"\"\n \n cont = Field(str)\n item = Field(str)\n field = Field(str)\n \n typecheck = False\n\n\nclass MapClause(EnumClause, ABCStruct):\n \n \"\"\"An enumerator over the MAP set.\"\"\"\n \n map = Field(str)\n key = Field(str)\n value = Field(str)\n \n pat_mask = (False, True, True)\n con_mask = (False, True, True)\n tagsin_mask = (True, False, False)\n tagsout_mask = (False, True, True)\n \n def get_domain_constrs(self, prefix):\n return ()\n \n def get_membership_constrs(self):\n return ()\n \n typecheck = True\n \n @classmethod\n def from_expr(cls, node):\n \"\"\"Construct from a membership expression\n \n (<var>, <var>, <var>) in _MAP\n \"\"\"\n checktype(node, L.AST)\n \n left, op, right = L.get_cmp(node)\n checktype(op, L.In)\n lhs = L.get_vartuple(left)\n assert len(lhs) == 3\n map, key, value = lhs\n rel = L.get_name(right)\n assert is_maprel(rel)\n return cls(map, key, value)\n \n @classmethod\n def from_AST(cls, node, factory):\n \"\"\"Construct from Enumerator node of form\n \n (<var>, <var>, <var>) in _MAP\n \"\"\"\n checktype(node, L.Enumerator)\n \n lhs = L.get_vartuple(node.target)\n rel = L.get_name(node.iter)\n if not len(lhs) == 3:\n raise TypeError\n map, key, value = lhs\n if not is_maprel(rel):\n raise TypeError\n return cls(map, key, value)\n \n def __init__(self, map, key, value):\n self.lhs = (map, key, value)\n self.rel = make_maprel()\n super().__init__(self.lhs, self.rel)\n \n def rate(self, bindenv):\n mask = Mask.from_vars(self.lhs, bindenv)\n if mask.is_allunbound:\n return Rate.UNRUNNABLE\n elif (mask.parts[0] == 'b' and\n (mask.parts[1] == 'b' or mask.parts[1] == '1')):\n return Rate.CONSTANT\n return super().rate(bindenv)\n \n def get_code(self, bindenv, body):\n mask = Mask.from_vars(self.lhs, bindenv)\n bvars, uvars, _eqs = mask.split_vars(self.lhs)\n return mapset_bindmatch(mask, bvars, uvars, body,\n typecheck=self.typecheck)\n\nclass MapClause_NoTC(MapClause):\n \n \"\"\"MapClause without type checks in emitted code.\"\"\"\n \n map = Field(str)\n key = Field(str)\n value = Field(str)\n \n typecheck = False\n\n\nclass ObjClauseFactory_Mixin(ClauseFactory):\n \n \"\"\"Factory that's aware of object clauses.\"\"\"\n \n objdomain = True\n \"\"\"Hook for disabling object domain clauses.\"\"\"\n \n @classmethod\n def get_clause_kinds(cls):\n if cls.objdomain:\n if cls.typecheck:\n pair_clauses = [MClause, FClause, MapClause]\n else:\n pair_clauses = [MClause_NoTC, FClause_NoTC, MapClause_NoTC]\n else:\n pair_clauses = []\n return pair_clauses + super().get_clause_kinds()\n" }, { "alpha_fraction": 0.3939688801765442, "alphanum_fraction": 0.46238651871681213, "avg_line_length": 35.72618865966797, "blob_id": "2ee244a3b8d26c8f83eda6e3d53613884dc1c0fb", "content_id": "41c6d073fadfd426cea7a42e9d5d7f816522e47a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3084, "license_type": "no_license", "max_line_length": 134, "num_lines": 84, "path": "/incoq/tests/programs/comp/implmode_out.py", "repo_name": "InvInc/invinc", "src_encoding": "UTF-8", "text": "from incoq.runtime import *\n# Comp2 := {(x, z) : (x, y) in E, (y, z) in E, (z > 3)}\n# Comp3 := {(x, z) : (x, y) in E, (y, z) in E, (z > 4)}\n_m_E_in = Map()\ndef _maint__m_E_in_add(_e):\n (v7_1, v7_2) = _e\n if (v7_2 not in _m_E_in):\n _m_E_in[v7_2] = set()\n _m_E_in[v7_2].add(v7_1)\n\n_m_E_out = Map()\ndef _maint__m_E_out_add(_e):\n (v5_1, v5_2) = _e\n if (v5_1 not in _m_E_out):\n _m_E_out[v5_1] = set()\n _m_E_out[v5_1].add(v5_2)\n\nComp3 = RCSet()\ndef _maint_Comp3_E_add(_e):\n v3_DAS = set()\n # Iterate {(v3_x, v3_y, v3_z) : (v3_x, v3_y) in deltamatch(E, 'bb', _e, 1), (v3_y, v3_z) in E, (v3_z > 4)}\n (v3_x, v3_y) = _e\n for v3_z in (_m_E_out[v3_y] if (v3_y in _m_E_out) else set()):\n if (v3_z > 4):\n if ((v3_x, v3_y, v3_z) not in v3_DAS):\n v3_DAS.add((v3_x, v3_y, v3_z))\n # Iterate {(v3_x, v3_y, v3_z) : (v3_x, v3_y) in E, (v3_y, v3_z) in deltamatch(E, 'bb', _e, 1), (v3_z > 4)}\n (v3_y, v3_z) = _e\n if (v3_z > 4):\n for v3_x in (_m_E_in[v3_y] if (v3_y in _m_E_in) else set()):\n if ((v3_x, v3_y, v3_z) not in v3_DAS):\n v3_DAS.add((v3_x, v3_y, v3_z))\n for (v3_x, v3_y, v3_z) in v3_DAS:\n if ((v3_x, v3_z) not in Comp3):\n Comp3.add((v3_x, v3_z))\n else:\n Comp3.incref((v3_x, v3_z))\n del v3_DAS\n\nComp2 = RCSet()\ndef _maint_Comp2_E_add(_e):\n v1_DAS = set()\n for (v1_x, v1_y, v1_z) in {(v1_x, v1_y, v1_z) for (v1_x, v1_y) in {_e} for (v1_y_2, v1_z) in E if (v1_y == v1_y_2) if (v1_z > 3)}:\n if ((v1_x, v1_y, v1_z) not in v1_DAS):\n v1_DAS.add((v1_x, v1_y, v1_z))\n for (v1_x, v1_y, v1_z) in {(v1_x, v1_y, v1_z) for (v1_x, v1_y) in E for (v1_y_2, v1_z) in {_e} if (v1_y == v1_y_2) if (v1_z > 3)}:\n if ((v1_x, v1_y, v1_z) not in v1_DAS):\n v1_DAS.add((v1_x, v1_y, v1_z))\n for (v1_x, v1_y, v1_z) in v1_DAS:\n if ((v1_x, v1_z) not in Comp2):\n Comp2.add((v1_x, v1_z))\n else:\n Comp2.incref((v1_x, v1_z))\n del v1_DAS\n\ndef query_Comp1():\n '{(x, z) : (x, y) in E, (y, z) in E, (z > 2)}'\n result = set()\n for (x, y) in E:\n for z in (_m_E_out[y] if (y in _m_E_out) else set()):\n if (z > 2):\n if ((x, z) not in result):\n result.add((x, z))\n return result\n\nE = Set()\nfor (v1, v2) in {(1, 2), (2, 3), (3, 4), (4, 5)}:\n E.add((v1, v2))\n # Begin maint _m_E_in after \"E.add((v1, v2))\"\n _maint__m_E_in_add((v1, v2))\n # End maint _m_E_in after \"E.add((v1, v2))\"\n # Begin maint _m_E_out after \"E.add((v1, v2))\"\n _maint__m_E_out_add((v1, v2))\n # End maint _m_E_out after \"E.add((v1, v2))\"\n # Begin maint Comp3 after \"E.add((v1, v2))\"\n _maint_Comp3_E_add((v1, v2))\n # End maint Comp3 after \"E.add((v1, v2))\"\n # Begin maint Comp2 after \"E.add((v1, v2))\"\n _maint_Comp2_E_add((v1, v2))\n # End maint Comp2 after \"E.add((v1, v2))\"\nprint(sorted({(x, z) for (x, y) in E for (y2, z) in E if (y == y2) if (z > 1)}))\nprint(sorted(query_Comp1()))\nprint(sorted(Comp2))\nprint(sorted(Comp3))" } ]
278
limichange/atlas-restore
https://github.com/limichange/atlas-restore
29e07de56a2023b3fa6a81b8ff1973372e942778
adda5133518513fc67de7100ed98fa369fff36f9
012c60a931d5387806ae63bd5aaaf03b0c3d1fd8
refs/heads/master
2020-04-23T01:23:59.282618
2019-02-15T06:12:46
2019-02-15T06:12:46
170,811,054
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6040268540382385, "alphanum_fraction": 0.6174496412277222, "avg_line_length": 10.461538314819336, "blob_id": "1f7feded0cb97453bc587b9fff02b88e21c4c158", "content_id": "503ae1b647c230260b63828ecb3044242ddb5507", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 233, "license_type": "permissive", "max_line_length": 31, "num_lines": 13, "path": "/README.md", "repo_name": "limichange/atlas-restore", "src_encoding": "UTF-8", "text": "# altas 游戏图集批量还原脚本\n\n### 运行环境\n- Python 2.7\n- PIL\n\n### 使用方法\n\n```\n# python main.py '还原文件文件夹'\npython main.py ./atlas\n```\n> 注意 : `atlas文件`和`图片文件`需在同一个目录下\n" }, { "alpha_fraction": 0.5238479971885681, "alphanum_fraction": 0.5254648327827454, "avg_line_length": 22.339622497558594, "blob_id": "e85be420d13f68398c4cac8c9c7ecef34ac8f53b", "content_id": "2dfbd6c31fc5077ffcfab34e4b2da80dd0344528", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1237, "license_type": "permissive", "max_line_length": 53, "num_lines": 53, "path": "/main.py", "repo_name": "limichange/atlas-restore", "src_encoding": "UTF-8", "text": "# coding=utf-8\nimport json\nimport os\nimport sys\nimport shutil\nfrom PIL import Image\nfrom os import walk\n\n# read finder url\ndirName = sys.argv[1]\n\n# read all files\nf = []\nfor (dirpath, dirnames, filenames) in walk(dirName):\n f.extend(filenames)\n break\n\nprint f\n\n# filter atlas files\nfor filename in f:\n [name, type] = filename.split('.')\n if type == 'atlas':\n print '\\n'\n\n atlas = open(dirName + '/' + filename, 'r')\n data = json.loads(atlas.read())\n image_name = 'atlas/' + data['meta']['image']\n path = 'dist/' + data['meta']['prefix']\n print image_name\n print path\n\n # delete path\n if os.path.exists(path):\n shutil.rmtree(path)\n\n # create path\n os.makedirs(path)\n\n # splice image\n im = Image.open(image_name)\n frames = data['frames']\n\n for frame in frames:\n frame_info = frames[frame]\n x = frame_info['frame']['x']\n y = frame_info['frame']['y']\n w = frame_info['frame']['w']\n h = frame_info['frame']['h']\n print frame, x, y, w, h\n box = (x, y, x + w, y + h)\n region = im.crop(box)\n region.save(path + frame)\n" } ]
2
k-vinogradov/prefix-updater
https://github.com/k-vinogradov/prefix-updater
7ec32a4aa778b6996c05c548d2d28a3e465e0306
869c00564567723e268e5df38a296bfced5089ce
9733ab1f13c4d370940f369e48400357affbd4c7
refs/heads/master
2021-01-22T11:58:19.624412
2015-10-14T06:59:28
2015-10-14T06:59:28
30,956,865
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.27557632327079773, "alphanum_fraction": 0.5401536822319031, "avg_line_length": 22.963899612426758, "blob_id": "ebec680ec0afd7a9f0105a0788cacc724b47988f", "content_id": "d3bc300b51dc8ce2b3cfeff54782f4f56146b711", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6637, "license_type": "no_license", "max_line_length": 106, "num_lines": 277, "path": "/update.py", "repo_name": "k-vinogradov/prefix-updater", "src_encoding": "UTF-8", "text": "import urllib2\nimport re\nimport os\nfrom datetime import datetime\nfrom ipcalc import Network\n\nACL_NAME = '115'\n\nOWN_NETWORKS = (\n '31.216.160.0/20',\n '46.166.64.0/18',\n '46.182.128.0/21',\n '80.240.32.0/20',\n #'89.105.128.0/21',\n '93.188.208.0/21',\n #'178.169.0.0/18',\n '100.64.0.0/10',\n)\n\nADDITIONAL_NETWORKS = (\n '172.17.200.160/27',\n '91.230.210.0/23',\n '84.22.159.128/25',\n '84.22.140.64/27',)\n\nCDN_NETWORKS = (\n '5.8.176.0/24',\n '5.61.16.0/21',\n '5.61.232.0/21',\n '31.148.222.0/24',\n '37.230.152.0/24',\n '37.230.153.0/24',\n '37.230.155.0/24',\n '37.230.240.0/24',\n '44.188.128.0/20',\n '77.74.24.0/21',\n '87.237.40.0/21',\n '87.240.128.0/18',\n '91.195.124.0/23',\n '91.206.202.0/23',\n '91.212.151.0/24',\n '91.217.50.0/23',\n '91.223.15.0/24',\n '91.225.236.0/23',\n '91.225.236.0/22',\n '91.225.238.0/23',\n '92.242.32.0/24',\n '92.242.34.0/24',\n '92.242.35.0/24',\n '92.242.36.0/24',\n '92.242.37.0/24',\n '92.242.38.0/24',\n '92.242.39.0/24',\n '93.186.224.0/21',\n '93.186.232.0/21',\n '94.100.176.0/20',\n '94.100.186.0/23',\n '94.101.96.0/23',\n '94.101.98.0/23',\n '94.101.100.0/23',\n '94.101.102.0/23',\n '94.127.152.0/23',\n '94.127.153.0/24',\n '95.131.24.0/24',\n '95.131.24.0/21',\n '95.131.25.0/24',\n '95.131.26.0/24',\n '95.131.27.0/24',\n '95.131.28.0/24',\n '95.131.29.0/24',\n '95.131.30.0/24',\n '95.131.31.0/24',\n '95.142.192.0/21',\n '95.142.200.0/21',\n '95.142.201.128/26',\n '95.163.32.0/19',\n '95.213.0.0/18',\n '109.73.15.0/24',\n '128.140.168.0/21',\n '128.140.170.0/24',\n '130.193.64.0/21',\n '130.193.65.0/24',\n '130.193.66.0/24',\n '130.193.67.0/24',\n '130.193.68.0/24',\n '141.101.241.0/24',\n '151.236.105.0/24',\n '178.20.232.0/21',\n '178.20.236.0/24',\n '178.21.8.0/21',\n '178.22.88.0/21',\n '178.22.90.0/24',\n '178.22.90.0/23',\n '178.22.91.0/24',\n '178.22.92.0/23',\n '178.159.248.0/21',\n '178.237.16.0/20',\n '185.3.140.0/24',\n '185.3.141.0/24',\n '185.3.142.0/24',\n '185.3.143.0/24',\n '185.5.136.0/22',\n '185.6.244.0/22',\n '185.16.244.0/23',\n '185.16.246.0/24',\n '185.16.247.0/24',\n '185.32.248.0/22',\n '185.37.48.0/22',\n '185.42.12.0/22',\n '188.93.56.0/21',\n '188.93.59.0/24',\n '188.93.60.0/24',\n '188.93.63.0/24',\n '188.93.208.0/23',\n '188.93.208.0/21',\n '188.93.213.0/24',\n '188.93.214.0/24',\n '188.93.215.0/24',\n '193.0.170.0/23',\n '194.165.24.0/23',\n '195.42.96.0/23',\n '195.114.104.0/23',\n '195.211.20.0/22',\n '195.211.128.0/23',\n '195.211.128.0/22',\n '195.211.130.0/23',\n '195.218.190.0/23',\n '212.108.104.0/22',\n '217.12.240.0/20',\n '217.20.144.0/20',\n '217.69.128.0/20',\n '193.186.34.0/24',\n '193.219.127.0/24',\n '193.227.134.0/24',\n '194.31.232.0/24',\n '194.176.118.0/24',\n '194.186.63.0/24',\n '195.114.104.0/24',\n '195.114.105.0/24',\n '195.211.20.0/24',\n '195.211.21.0/24',\n '195.211.128.0/24',)\n\nURL_LIST = (\n 'http://lg.sibir-ix.ru/slave/ipv4/routelist',\n 'http://red-ix.ru/tools/nets.txt',\n)\n\nOUTPUT_PATH = '/var/ftp/maintenance/prefix-update/acl-115'\n\nHEADER = '''\n; This file was generated automatically at {date_time}\n;\n; File contains elements for the extended ACL {acl_name} which were downloaded from:\n; {url_list}\n;\n; Bug-report: kostya.vinogradov@gmail.com\n\nno ip access-list extended {acl_name}\nip access-list extended {acl_name}\n'''.format(date_time=datetime.now().strftime('%c'), acl_name=ACL_NAME, url_list='\\n; '.join(URL_LIST))\n\nNETWORK_TEMPLATE = '''\n permit ip {net1} {net2}\n permit ip {net2} {net1}\n'''\n\nWILDCARD_MASKS = {\n 1: '127.255.255.255',\n 2: '63.255.255.255 ',\n 3: '31.255.255.255 ',\n 4: '15.255.255.255 ',\n 5: '7.255.255.255 ',\n 6: '3.255.255.255 ',\n 7: '1.255.255.255 ',\n 8: '0.255.255.255',\n 9: '0.127.255.255',\n 10: '0.63.255.255',\n 11: '0.31.255.255',\n 12: '0.15.255.255',\n 13: '0.7.255.255',\n 14: '0.3.255.255',\n 15: '0.1.255.255',\n 16: '0.0.255.255',\n 17: '0.0.127.255',\n 18: '0.0.63.255',\n 19: '0.0.31.255',\n 20: '0.0.15.255',\n 21: '0.0.7.255',\n 22: '0.0.3.255',\n 23: '0.0.1.255',\n 24: '0.0.0.255',\n 25: '0.0.0.127',\n 26: '0.0.0.63',\n 27: '0.0.0.31',\n 28: '0.0.0.15',\n 29: '0.0.0.7',\n 30: '0.0.0.3',\n 31: '0.0.0.1',\n 32: '0.0.0.0',\n}\n\n\ndef log(message):\n print message\n\n\ndef load_prefixes(url):\n result = []\n r = urllib2.Request(url)\n log('Loading prefix-list from ' + url + '.')\n try:\n f = urllib2.urlopen(r).read()\n except urllib2.HTTPError as e:\n log('Prefix-list download error - {0}'.format(e.__str__()))\n return result\n regexp = re.compile('\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}/\\d{1,3}')\n for line in f.split('\\n'):\n result += regexp.findall(line)\n return result\n\n\nloaded = []\nfor url in URL_LIST:\n loaded = loaded + load_prefixes(url)\n\nloaded += list(ADDITIONAL_NETWORKS) + list(CDN_NETWORKS)\n\nlog('{0} prefixes was loaded'.format(len(loaded)))\nlog('Remove collisions')\nloaded.sort(key=lambda nl: Network(nl).size(), reverse=True)\nnetworks = []\nown_networks = [Network(n) for n in OWN_NETWORKS]\nfor prefix in loaded:\n network = Network(prefix)\n need_add = True\n for network2 in own_networks:\n if network.check_collision(network2):\n need_add = False\n break\n if not need_add:\n continue\n for network2 in networks:\n if network.check_collision(network2):\n need_add = False\n if network.size() > network2.size():\n networks[networks.index(network2)] = network\n break\n if need_add:\n networks.append(network)\n\nlog('Total {0} subnets'.format(len(networks)))\n\nlog('Create ACL config')\n\nacl_config = HEADER + ''.join([NETWORK_TEMPLATE.format(\n net1='{0} {1}'.format(n.network(), WILDCARD_MASKS[n.subnet()]),\n net2='any') for n in networks])\n\nlog('Add own networks')\n\nfor prefix in own_networks:\n network = Network(prefix)\n for prefix2 in own_networks:\n network2 = Network(prefix2)\n acl_config += NETWORK_TEMPLATE.format(\n net1='{0} {1}'.format(network.network(), WILDCARD_MASKS[network.subnet()]),\n net2='{0} {1}'.format(network2.network(), WILDCARD_MASKS[network2.subnet()]))\n\nlog('Write config to {0}'.format(OUTPUT_PATH))\n\nif 'PycharmProject' in os.path.realpath(__file__):\n print acl_config\nelse:\n f = open(OUTPUT_PATH, 'w+')\n f.write(acl_config)\n f.close()" } ]
1
manoadamro/flapi-schema
https://github.com/manoadamro/flapi-schema
3bd2bcf39b308dee0ff96f6bc99cc0a1e81cb7a4
840cfe4bd0ff1e057c3ace9931bd35d8fdaf7808
2cd82d99b1abae8c16d35905b4fde97fdddd2b5a
refs/heads/master
2020-04-16T07:54:31.963040
2019-02-28T18:10:50
2019-02-28T18:10:50
165,403,488
0
0
MIT
2019-01-12T15:31:19
2019-01-13T01:26:25
2019-02-28T18:10:51
Python
[ { "alpha_fraction": 0.6722306609153748, "alphanum_fraction": 0.6779211163520813, "avg_line_length": 34.14666748046875, "blob_id": "f778c0b2f21c6b47c33cf2d7dde084887bb5ebb1", "content_id": "4fbfc77559dfd369d003001a569dd7b605c76e6c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2636, "license_type": "permissive", "max_line_length": 84, "num_lines": 75, "path": "/tests/test_schema_protect.py", "repo_name": "manoadamro/flapi-schema", "src_encoding": "UTF-8", "text": "import unittest\nimport unittest.mock\n\nimport flask\n\nimport flapi_schema.errors\nimport flapi_schema.protect\nimport flapi_schema.types\n\n\nclass FakeSchema(flapi_schema.types.Schema):\n __strict__ = True\n test = flapi_schema.types.Bool()\n\n\ndef route(json_body):\n return json_body\n\n\nclass SchemaProtectTest(unittest.TestCase):\n def setUp(self):\n self.app = flask.Flask(\"TestFlask\")\n\n @unittest.mock.patch.object(\n flask, \"request\", unittest.mock.Mock(json={\"test\": True})\n )\n def test_expects_specific_json(self):\n func = flapi_schema.protect(FakeSchema)(route)\n self.assertEqual(func(), {\"test\": True})\n\n @unittest.mock.patch.object(\n flask, \"request\", unittest.mock.Mock(json={\"nope\": True})\n )\n def test_fails_when_expecting_specific_json(self):\n func = flapi_schema.protect(FakeSchema)(route)\n self.assertRaises(flapi_schema.errors.SchemaValidationError, func)\n\n @unittest.mock.patch.object(\n flask, \"request\", unittest.mock.Mock(json={\"anything\": 123})\n )\n def test_expects_any_json(self):\n func = flapi_schema.protect(True)(route)\n self.assertEqual(func(), {\"anything\": 123})\n\n @unittest.mock.patch.object(flask, \"request\", unittest.mock.Mock(is_json=False))\n def test_fails_when_expecting_any_json(self):\n func = flapi_schema.protect(True)(route)\n self.assertRaises(flapi_schema.errors.SchemaValidationError, func)\n\n @unittest.mock.patch.object(flask, \"request\", unittest.mock.Mock(is_json=False))\n def test_expects_no_json(self):\n func = flapi_schema.protect(False)(route)\n self.assertEqual(func(), None)\n\n @unittest.mock.patch.object(flask, \"request\", unittest.mock.Mock(is_json=True))\n def test_fails_when_expecting_no_json(self):\n func = flapi_schema.protect(False)(route)\n self.assertRaises(flapi_schema.errors.SchemaValidationError, func)\n\n @unittest.mock.patch.object(flask, \"request\", unittest.mock.Mock(is_json=False))\n def test_expects_any_or_no_json_gets_none(self):\n func = flapi_schema.protect(None)(route)\n self.assertEqual(func(), None)\n\n @unittest.mock.patch.object(\n flask, \"request\", unittest.mock.Mock(is_json=True, json={\"yep\": 123})\n )\n def test_expects_any_or_no_json_gets_json(self):\n func = flapi_schema.protect(None)(route)\n self.assertEqual(func(), {\"yep\": 123})\n\n @unittest.mock.patch.object(flask, \"request\", unittest.mock.Mock())\n def test_wrong_type(self):\n func = flapi_schema.protect(123)(route)\n self.assertRaises(flapi_schema.errors.SchemaValidationError, func)\n" }, { "alpha_fraction": 0.6442382335662842, "alphanum_fraction": 0.6481052041053772, "avg_line_length": 37.02941131591797, "blob_id": "a2915c767df96e22d6617b33fa83a4f97687f916", "content_id": "d76f95c95dfc12899629fdabbffedc6ed60d2389", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2586, "license_type": "permissive", "max_line_length": 87, "num_lines": 68, "path": "/tests/types/test_array.py", "repo_name": "manoadamro/flapi-schema", "src_encoding": "UTF-8", "text": "import unittest\n\nimport flapi_schema.errors\nimport flapi_schema.types\n\n\nclass BasicSchema(flapi_schema.types.Schema):\n thing = flapi_schema.types.Bool()\n\n\nclass ArrayTest(unittest.TestCase):\n def test_min_only(self):\n prop = flapi_schema.types.Array(flapi_schema.types.Bool, min_length=0)\n self.assertEqual(prop([True, True]), [True, True])\n\n def test_min_only_out_of_range(self):\n prop = flapi_schema.types.Array(flapi_schema.types.Bool, min_length=1)\n self.assertRaises(flapi_schema.errors.SchemaValidationError, prop, [])\n\n def test_max_only(self):\n prop = flapi_schema.types.Array(flapi_schema.types.Bool, max_length=3)\n self.assertEqual(prop([True, True]), [True, True])\n\n def test_max_only_out_of_range(self):\n prop = flapi_schema.types.Array(flapi_schema.types.Bool, max_length=3)\n self.assertRaises(\n flapi_schema.errors.SchemaValidationError, prop, [True, True, True, True]\n )\n\n def test_min_and_max(self):\n prop = flapi_schema.types.Array(\n flapi_schema.types.Bool, min_length=0, max_length=3\n )\n self.assertEqual(prop([True, True]), [True, True])\n\n def test_min_and_max_out_of_range(self):\n prop = flapi_schema.types.Array(\n flapi_schema.types.Bool, min_length=0, max_length=3\n )\n self.assertRaises(\n flapi_schema.errors.SchemaValidationError, prop, [True, True, True, True]\n )\n\n def test_no_range(self):\n prop = flapi_schema.types.Array(flapi_schema.types.Bool)\n self.assertEqual(prop([True, True, True, True]), [True, True, True, True])\n\n def test_array_of_property(self):\n prop = flapi_schema.types.Array(flapi_schema.types.Bool)\n self.assertEqual(prop([True, True]), [True, True])\n\n def test_array_of_property_fails(self):\n prop = flapi_schema.types.Array(flapi_schema.types.Bool)\n self.assertRaises(flapi_schema.errors.SchemaValidationError, prop, [True, \"\"])\n\n def test_wrong_type(self):\n prop = flapi_schema.types.Array(BasicSchema, callback=None)\n self.assertRaises(flapi_schema.errors.SchemaValidationError, prop, 12)\n\n def test_callback(self):\n prop = flapi_schema.types.Array(\n BasicSchema, callback=lambda v: [{\"thing\": True}]\n )\n self.assertEqual(prop([{\"thing\": False}, {\"thing\": False}]), [{\"thing\": True}])\n\n def test_no_callback(self):\n prop = flapi_schema.types.Array(BasicSchema, callback=None)\n self.assertEqual(prop([{\"thing\": False}]), [{\"thing\": False}])\n" }, { "alpha_fraction": 0.6786649227142334, "alphanum_fraction": 0.679973840713501, "avg_line_length": 32.21739196777344, "blob_id": "2476d38a82e36f61770f4a74fb4141c2a775d9d6", "content_id": "8221c3bb87abbbde892a9a50824dbf762bf4ebc4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1528, "license_type": "permissive", "max_line_length": 80, "num_lines": 46, "path": "/tests/types/test_bool.py", "repo_name": "manoadamro/flapi-schema", "src_encoding": "UTF-8", "text": "import unittest\n\nimport flapi_schema.errors\nimport flapi_schema.types\n\n\nclass BoolTest(unittest.TestCase):\n def test_nullable_by_default(self):\n prop = flapi_schema.types.Bool()\n self.assertIsNone(prop(None))\n\n def test_nullable_allows_null(self):\n prop = flapi_schema.types.Bool(nullable=True)\n self.assertIsNone(prop(None))\n\n def test_nullable_raises_error(self):\n prop = flapi_schema.types.Bool(nullable=False)\n self.assertRaises(flapi_schema.errors.SchemaValidationError, prop, None)\n\n def test_default_is_none(self):\n prop = flapi_schema.types.Bool(default=None)\n self.assertIsNone(prop(None))\n\n def test_default_value(self):\n prop = flapi_schema.types.Bool(default=True)\n self.assertEqual(prop(None), True)\n\n def test_default_passive_when_value_not_none(self):\n prop = flapi_schema.types.Bool(default=False)\n self.assertEqual(prop(True), True)\n\n def test_default_callable(self):\n prop = flapi_schema.types.Bool(default=lambda: True)\n self.assertEqual(prop(None), True)\n\n def test_wrong_type(self):\n prop = flapi_schema.types.Bool(callback=None)\n self.assertRaises(flapi_schema.errors.SchemaValidationError, prop, 12)\n\n def test_callback(self):\n prop = flapi_schema.types.Bool(callback=lambda v: False)\n self.assertEqual(prop(True), False)\n\n def test_no_callback(self):\n prop = flapi_schema.types.Bool(callback=None)\n self.assertEqual(prop(True), True)\n" }, { "alpha_fraction": 0.7086406946182251, "alphanum_fraction": 0.714995265007019, "avg_line_length": 28.89519691467285, "blob_id": "d1912e5657950e8447f1bae07b580ea28a3518bd", "content_id": "af99f8d0e1af540fec707ded5dba0beb9a829d94", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 13691, "license_type": "permissive", "max_line_length": 163, "num_lines": 458, "path": "/README.md", "repo_name": "manoadamro/flapi-schema", "src_encoding": "UTF-8", "text": "[![CircleCI](https://circleci.com/gh/manoadamro/flapi/tree/master.svg?style=svg)](https://circleci.com/gh/manoadamro/flapi-schema/tree/master)\n[![Coverage Status](https://coveralls.io/repos/github/manoadamro/flapi/badge.svg?branch=master)](https://coveralls.io/github/manoadamro/flapi-schema?branch=master)\n[![CodeFactor](https://www.codefactor.io/repository/github/manoadamro/flapi/badge)](https://www.codefactor.io/repository/github/manoadamro/flapi-schema)\n[![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/ambv/black)\n\n---\n\n# flapi-schema\n\n```python\n\ndef get_minimum_dob():\n return (datetime.datetime.utcnow() - datetime.timedelta(days=365.25 * 16)).date()\n\n\nclass Address(Schema):\n number = Int(min_value=0, nullable=False)\n post_code = Regex(\n re.compile(\"[a-zA-z]{2}[0-9] ?[0-9][a-zA-z]{2}\"), nullable=False\n )\n\n\nclass Items(Schema):\n name = String(min_length=3, max_length=50, nullable=False)\n count = Int(min_value=0, default=0)\n\n\nclass Person(Schema):\n __strict__ = True\n name = String(min_length=3, max_length=50, nullable=False)\n address = Object(Address, nullable=False, strict=True)\n friends = Array(Uuid, default=[])\n items = Array(Object(Items, strict=False), default=[])\n date_of_birth = Date(max_value=get_minimum_dob, nullable=False)\n date_of_death = Date(max_value=datetime.date.today, nullable=True)\n\n @custom_property(int, float, nullable=False)\n def something(cls, value):\n return value * 2\n```\n\n## Schema(...)\n\nBase class for schema definitions\n\n```python\n\nclass MySchema(Schema):\n ...\n\n```\n\nNotes:\n\n- Anything (including methods) defined in schema classes will be considered a property.\n If you wish to hide an attribute/method you will need to prefix it with an underscore\n\n- You can define properties using a method without the `@custom_property` decorator.\n This will mean you are passed value with no checks having been done on it.\n\n- To mark a schema as \"strict\" (meaning extra keys are not accepted),\n add `__strict__ = True` as an attribute\n\n## protect(...)\n\n```python\n@protect(MySchema)\ndef some_method():\n ...\n```\n\n__schema__: Schema, Property or Rule (required) The check that has to pass in order for the decorated method to be called. see [flapi_schema.types](#Schema-Types)\n\n---\n\n# Schema Types\n\n## Property(...)\n\nBase class for all JWT rules.\n<br>\nCan be used to build custom property\n\n```python\nclass MyProperty:\n def __init__(self, multiplier, **kwargs):\n # call super and tell Property we only accept ints or floats,\n # pass on any kwargs\n super(DateTime, self).__init__(int, float, **kwargs)\n self.multiplier = multiplier\n\n def __call__(self, item):\n # do the default checks by calling super(),\n # get back an updated value\n value = super(Array, self).__call__(value)\n\n # do the property thing\n return value * multiplier\n\n```\n\n__types__: tuple of types (required) Accepted value types\n\n__nullable__: bool (default True) If false, an error will be raised if a null value is receeved\n\n__default__: Any (default None) If a null value is a received, it will be replaced with this\n\n__callback__: Callable (default None) A method to call once all checks are complete.\nThis method receives the value as its only parameter and returns a modified value\n\n## custom_property(...)\n\nDoes the same as [Property](#Property) but as a decorator!\n\n```python\n@custom_property(int, float, nullable=False)\ndef something(cls, value):\n return value * 2\n\n@custom_property(int, float, default=1)\ndef something_else(cls, value):\n return value * 3\n```\n\n__types__: tuple of types (required) Accepted value types\n\n__nullable__: bool (default True) If false, an error will be raised if a null value is receeved\n\n__default__: Any (default None) If a null value is a received, it will be replaced with this\n\n__callback__: Callable (default None) A method to call once all checks are complete.\nThis method receives the value as its only parameter and returns a modified value\n\n## Object(...)\n\nAllows nesting of schemas\n\n```python\nclass Address(Schema):\n number = Int(min_value=0, nullable=False)\n post_code = Regex(\n re.compile(\"[a-zA-z]{2}[0-9] ?[0-9][a-zA-z]{2}\"), nullable=False\n )\n\nclass Person(Schema):\n address = Object(Address, nullable=False, strict=True)\n```\n\n__strict__: bool (default False) overrides `__strict__` attribute on schema definition\n\n__nullable__: bool (default True) If false, an error will be raised if a null value is receeved\n\n__default__: Any (default None) If a null value is a received, it will be replaced with this\n\n__callback__: Callable (default None) A method to call once all checks are complete.\nThis method receives the value as its only parameter and returns a modified value\n\n## Array(...)\n\nDefines an array of items conforming to a Schema/Property definition\n\n```python\nclass MySchema(Schema):\n items = Array(Object(Item, strict=False), default=[])\n```\n\n__min_length__: Property or Rule (required)\n\n__min_length__: int or callable returning int (default None) Minimum allowed array length\n\n__max_length__: int or callable returning int (default None) Maximum allowed array length\n\n__callback__: Callable (default None) A method to call once all checks are complete.\nThis method receives the value as its only parameter and returns a modified value\n\nNotes:\n\n- Value will default to an empty array if none\n\n## Choice(...)\n\nEnsures a value is equal to one from a defined set.\n\n```python\nclass MySchema(Schema):\n choice = Choice([Bool(), Int(), \"1\", \"2\"], nullable=False)\n```\n\n__choices__: list (required) A list containing specific valid values, Property definitions or a mix of the two.\n\n__nullable__: bool (default True) If false, an error will be raised if a null value is receeved\n\n__default__: Any (default None) If a null value is a received, it will be replaced with this\n\n__callback__: Callable (default None) A method to call once all checks are complete.\nThis method receives the value as its only parameter and returns a modified value\n\nNotes:\n\n- If a value conforms to more than one choice, it will be validated against the first valid one.\n\n## Number(...)\n\nEnsures a value is either an in or a float\n\n```python\nclass MySchema(Schema):\n number = Number(min_value=0, max_value=10, nullable=False, default=2)\n```\n\n__min_value__: int, float or callable returning int or float (default None) Minimum allowed value\n\n__max_value__: int, float or callable returning int or float (default None) Maximum allowed value\n\n__nullable__: bool (default True) If false, an error will be raised if a null value is receeved\n\n__default__: Any (default None) If a null value is a received, it will be replaced with this\n\n__callback__: Callable (default None) A method to call once all checks are complete.\nThis method receives the value as its only parameter and returns a modified value\n\n## Int(...)\n\nEnsures a value is an integer\n\n```python\nclass MySchema(Schema):\n number = Int(min_value=0, max_value=10, nullable=False, default=2)\n```\n\n__min_value__: int or callable returning int (default None) Minimum allowed value\n\n__max_value__: int or callable returning int (default None) Maximum allowed value\n\n__nullable__: bool (default True) If false, an error will be raised if a null value is receeved\n\n__default__: Any (default None) If a null value is a received, it will be replaced with this\n\n__callback__: Callable (default None) A method to call once all checks are complete.\nThis method receives the value as its only parameter and returns a modified value\n\n## Float(...)\n\nEnsures a value is a float\n\n```python\nclass MySchema(Schema):\n number = Float(min_value=0.0, max_value=6.5, nullable=False, default=2.5)\n```\n\n__min_value__: float or callable returning float (default None) Minimum allowed value\n\n__max_value__: float or callable returning float (default None) Maximum allowed value\n\n__nullable__: bool (default True) If false, an error will be raised if a null value is receeved\n\n__default__: Any (default None) If a null value is a received, it will be replaced with this\n\n__callback__: Callable (default None) A method to call once all checks are complete.\nThis method receives the value as its only parameter and returns a modified value\n\n## Bool(...)\n\nEnsures a value is either true or false\n\n```python\nclass MySchema(Schema):\n boolean = Bool(nullable=False, default=True)\n```\n\n__nullable__: bool (default True) If false, an error will be raised if a null value is receeved\n\n__default__: Any (default None) If a null value is a received, it will be replaced with this\n\n__callback__: Callable (default None) A method to call once all checks are complete.\nThis method receives the value as its only parameter and returns a modified value\n\n## String(...)\n\nEnsures a value is a string\n\n```python\nclass MySchema(Schema):\n thing = String(min_length=2, max_length=5, nullable=False)\n```\n\n__min_length__: int or callable returning int (default None) Minimum allowed string length\n\n__max_length__: int or callable returning int (default None) Maximum allowed string length\n\n__nullable__: bool (default True) If false, an error will be raised if a null value is receeved\n\n__default__: Any (default None) If a null value is a received, it will be replaced with this\n\n__callback__: Callable (default None) A method to call once all checks are complete.\nThis method receives the value as its only parameter and returns a modified value\n\n## Regex(...)\n\nEnsures a value matches a regex string\n\n```python\nclass MySchema(Schema):\n thing = Regex(\".+@[^@]+.[^@]{2,}$\", min_length=2, max_length=5, nullable=False)\n```\n\n__matcher__: regex string or compiled pattern (required) Regex to match value against\n\n__min_length__: int or callable returning int (default None) Minimum allowed string length\n\n__max_length__: int or callable returning int (default None) Maximum allowed string length\n\n__nullable__: bool (default True) If false, an error will be raised if a null value is receeved\n\n__default__: Any (default None) If a null value is a received, it will be replaced with this\n\n__callback__: Callable (default None) A method to call once all checks are complete.\nThis method receives the value as its only parameter and returns a modified value\n\n## Email(...)\n\nEnsures a value is a valid email address\n\n```python\nclass MySchema(Schema):\n thing = Email(nullable=False)\n```\n\n__min_length__: int or callable returning int (default None) Minimum allowed string length\n\n__max_length__: int or callable returning int (default None) Maximum allowed string length\n\n__nullable__: bool (default True) If false, an error will be raised if a null value is receeved\n\n__default__: Any (default None) If a null value is a received, it will be replaced with this\n\n__callback__: Callable (default None) A method to call once all checks are complete.\nThis method receives the value as its only parameter and returns a modified value\n\nNotes:\n\n- matcher used: `.+@[^@]+.[^@]{2,}$`\n\n## Uuid(...)\n\nEnsures a value is a valid uuid\n\n```python\nclass MySchema(Schema):\n thing = Email(nullable=False, strip_hyphens=True)\n```\n\n__strip_hyphens__: bool (default False) If true, hyphens will be removed from the value string\n\n__nullable__: bool (default True) If false, an error will be raised if a null value is receeved\n\n__default__: Any (default None) If a null value is a received, it will be replaced with this\n\n__callback__: Callable (default None) A method to call once all checks are complete.\nThis method receives the value as its only parameter and returns a modified value\n\nNotes:\n\n- matcher used: `^[a-fA-F0-9]{8}-?[a-fA-F0-9]{4}-?[a-fA-F0-9]{4}-?[a-fA-F0-9]{4}-?[a-fA-F0-9]{12}$`\n\n## Date(...)\n\nEnsures a value is either a valid iso8601 date or utc timestamp and parses to date object\n\n```python\nclass MySchema(Schema):\n date = Date(nullable=False, min_value=datetime.date.today)\n```\n\n__min_value__: date or callable returning date (default None) Minimum allowed date\n\n__max_value__: date or callable returning date (default None) Maximum allowed date\n\n__nullable__: bool (default True) If false, an error will be raised if a null value is receeved\n\n__default__: Any (default None) If a null value is a received, it will be replaced with this\n\n__callback__: Callable (default None) A method to call once all checks are complete.\nThis method receives the value as its only parameter and returns a modified value\n\nNotes:\n\n- format used: `%Y-%m-%d`\n\n## Datetime(...)\n\nEnsures a value is a valid iso8601 datetime or utc timestamp and parses to datetime object\n\n```python\nclass MySchema(Schema):\n time = DateTime(nullable=False, min_value=datetime.datetime.now)\n```\n\n__min_value__: datetime or callable returning datetime (default None) Minimum allowed datetime\n\n__max_value__: datetime or callable returning datetime (default None) Maximum allowed datetime\n\n__nullable__: bool (default True) If false, an error will be raised if a null value is receeved\n\n__default__: Any (default None) If a null value is a received, it will be replaced with this\n\n__callback__: Callable (default None) A method to call once all checks are complete.\nThis method receives the value as its only parameter and returns a modified value\n\nNotes:\n\n- format used: `%Y-%m-%dT%H:%M:%S.%f`\n\n- accepts timezones in `hh:mm` format or `Z`\n\n## AllOf(...)\n\nTODO description\n\n```python\n# TODO example\n```\n\nTODO params\n\n## AnyOf(...)\n\nTODO description\n\n```python\n# TODO example\n```\n\nTODO params\n\n## NoneOf(...)\n\nTODO description\n\n```python\n# TODO example\n```\n\nTODO params\n\n---\n\n## Callback(...)\n\nTODO description\n\n```python\n# TODO example\n```\n\nTODO params\n\n---" } ]
4
IngabireTina/ingabire2
https://github.com/IngabireTina/ingabire2
0836f0cf4c8b6e353c67d054b541efd53642621d
0cc0331344077e2af9e2a030c57071a5a34c2d11
4684a72376f5dbf81415c6e883aba8d4bbfe650e
refs/heads/master
2022-12-01T15:37:46.810792
2020-08-09T14:46:55
2020-08-09T14:46:55
286,255,288
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.804651141166687, "alphanum_fraction": 0.804651141166687, "avg_line_length": 25.75, "blob_id": "a9dcd9fa648061cd5f4875e0826a3869e1e3c437", "content_id": "4e00f90dc05462bafda8bad41a8fb574fe33dbe3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 215, "license_type": "no_license", "max_line_length": 33, "num_lines": 8, "path": "/registerItem/admin.py", "repo_name": "IngabireTina/ingabire2", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom registerItem.models import *\nfrom account.models import User\n\n# Register your models here.\nadmin.site.register(Item)\nadmin.site.register(Stock)\n# admin.site.register(Category)\n\n" }, { "alpha_fraction": 0.7388059496879578, "alphanum_fraction": 0.7388059496879578, "avg_line_length": 25.799999237060547, "blob_id": "2832354a1d3f089259c03bcfc1a17141a458985c", "content_id": "280ae22b87f3600f7a84945989d2feef58e4abec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 134, "license_type": "no_license", "max_line_length": 54, "num_lines": 5, "path": "/account/signals.py", "repo_name": "IngabireTina/ingabire2", "src_encoding": "UTF-8", "text": "# from django.db.models.signals import post_save\n# from.models import User\n#\n# def user_profile(sender, instance, create, **kwargs)\n#\n" }, { "alpha_fraction": 0.6262136101722717, "alphanum_fraction": 0.6262136101722717, "avg_line_length": 21.88888931274414, "blob_id": "80da4af0e16100c3469867fef49b804b13a620f4", "content_id": "3e70ab319f9f0c36c5b0e2aae4bd649f06a47ba5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 206, "license_type": "no_license", "max_line_length": 47, "num_lines": 9, "path": "/account/filters.py", "repo_name": "IngabireTina/ingabire2", "src_encoding": "UTF-8", "text": "# import django_filters\n# from .models import *\n# from registerItem.models import *\n#\n#\n# class SearchFilter(django_filters.filterset):\n# class Meta:\n# model = User\n# fields = '__all__'\n" }, { "alpha_fraction": 0.5702647566795349, "alphanum_fraction": 0.6130346059799194, "avg_line_length": 26.27777862548828, "blob_id": "4203bd052eb114106875ca0ca5aa44adcab84312", "content_id": "7a8faf1aaf0f35fa386e1fe4c6c92835fd71112d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 491, "license_type": "no_license", "max_line_length": 145, "num_lines": 18, "path": "/registerItem/migrations/0003_item_availability.py", "repo_name": "IngabireTina/ingabire2", "src_encoding": "UTF-8", "text": "# Generated by Django 3.0.8 on 2020-08-05 16:59\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('registerItem', '0002_remove_stock_quantity'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='item',\n name='availability',\n field=models.CharField(choices=[('Available', 'Available'), ('Not Available', 'Not Available')], default='Available', max_length=20),\n ),\n ]\n" }, { "alpha_fraction": 0.5231481194496155, "alphanum_fraction": 0.5949074029922485, "avg_line_length": 23, "blob_id": "03b7bf96a9efaa070b6e197dff80a3623e3db850", "content_id": "41c6e7fbcfc1974432e9f78b3f2e32109b430fa4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 432, "license_type": "no_license", "max_line_length": 93, "num_lines": 18, "path": "/account/migrations/0004_auto_20200803_1526.py", "repo_name": "IngabireTina/ingabire2", "src_encoding": "UTF-8", "text": "# Generated by Django 3.0.8 on 2020-08-03 13:26\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('account', '0003_auto_20200803_0904'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='user',\n name='profile_pic',\n field=models.ImageField(blank=True, default='tina.jpg', null=True, upload_to=''),\n ),\n ]\n" }, { "alpha_fraction": 0.5552434325218201, "alphanum_fraction": 0.579587996006012, "avg_line_length": 31.363636016845703, "blob_id": "e53cabf6635111e8516477566e9d01235e96c6a6", "content_id": "5208af36bb1c1aedcc731c4b2d644dc242ead9a5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1068, "license_type": "no_license", "max_line_length": 137, "num_lines": 33, "path": "/registerItem/migrations/0005_auto_20200809_1008.py", "repo_name": "IngabireTina/ingabire2", "src_encoding": "UTF-8", "text": "# Generated by Django 3.0.8 on 2020-08-09 08:08\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('registerItem', '0004_item_user'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='item',\n name='availability',\n ),\n migrations.AddField(\n model_name='item',\n name='counting',\n field=models.CharField(choices=[('Available', 'Available'), ('Given', 'Given')], default='Given', max_length=20),\n ),\n migrations.AddField(\n model_name='stock',\n name='availability',\n field=models.CharField(choices=[('Available', 'Available'), ('Given', 'Given')], default='Available', max_length=20),\n ),\n migrations.AlterField(\n model_name='item',\n name='device',\n field=models.OneToOneField(max_length=200, null=True, on_delete=django.db.models.deletion.SET_NULL, to='registerItem.Stock'),\n ),\n ]\n" } ]
6
obaica/Refraction-index
https://github.com/obaica/Refraction-index
39428f76483c8fb2dd2b3d33fd5fe84d55b86d64
4bbb02215f7a7575725f03952834a48a50002a1a
b8e3447935991e72d1832b2d40adf9cbf057b7b1
refs/heads/master
2021-10-11T19:37:18.170024
2019-01-29T03:28:51
2019-01-29T03:28:51
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7957894802093506, "alphanum_fraction": 0.7957894802093506, "avg_line_length": 93.4000015258789, "blob_id": "8ebb5431574b90d1051bd0cf5610647437459558", "content_id": "0f9565b592cae2c5f7a79ce2b5b0607fda17daef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 475, "license_type": "no_license", "max_line_length": 267, "num_lines": 5, "path": "/README.md", "repo_name": "obaica/Refraction-index", "src_encoding": "UTF-8", "text": "**Refraction index**\n\nThis python script is designed to calculate n and k, the components of the complex refractive index, which can be derived from the real and imaginary parts of the dielectric matrix. The dielectric matrix is derived from the optical properties calculation using VASP. \n\nThe output of this script contains dataFrame csv file of the n and k values along in-plane and out-of-plane direction, the plot and smooth plot of the n and k for three strucutres. \n\n\n" }, { "alpha_fraction": 0.5709592700004578, "alphanum_fraction": 0.5908891558647156, "avg_line_length": 35.52799987792969, "blob_id": "d71dbbe81f5227360c8637b7e56e73cbd605df75", "content_id": "36be7dbbbedd5becbe8163b7140ed8e515de2a1f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4566, "license_type": "no_license", "max_line_length": 127, "num_lines": 125, "path": "/n_k.py", "repo_name": "obaica/Refraction-index", "src_encoding": "UTF-8", "text": "import pandas as pd\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport scipy.signal\n\n\nnp.random.seed(1)\n\n\ndef grep_diele_matrix(imag=True, prefix=\"\"):\n \"\"\"\n :param imag: extract imaginary and real part from VASP output file vasprun.xml\n :param prefix: defined in the main function\n :return: separate imaginary and real part dataFrame csv files.\n \"\"\"\n input_file = open(prefix + \"vasprun.xml\", \"r\")\n outfile = open(prefix + \"E_imag.csv\", \"w\") if imag else open(prefix + \"E_real.csv\", \"w\")\n tag = False\n count = 0\n start_tag = \"<imag>\" if imag else \"<real>\"\n\n column_names = \"Energy,xx,yy,zz,xy,xz,yz\\n\"\n outfile.write(column_names)\n\n for line in input_file:\n if line.strip() == start_tag:\n tag = True\n elif tag:\n if line.strip() == \"</set>\":\n tag = False\n break\n if count >= 10:\n data_list = line.split()[1:-1]\n outfile.write(','.join(data_list) + '\\n')\n count += 1\n\n input_file.close()\n outfile.close()\n\n\ndef calc_nk(prefix=\"\"):\n \"\"\"\n :param prefix: defined in the main function\n :return: refraction index for rhombhedral(xx==yy==zz, xy==xz==yz), hexgonal(xx==yy, zz, and xy==xz==yz==0), cubic structure\n \"\"\"\n image_df = pd.read_csv(prefix + \"E_imag.csv\")\n real_df = pd.read_csv(prefix + \"E_real.csv\")\n\n # calculate epsilon_real & image rhombhedral phase\n epsilon_para_real = real_df.loc[:]['xx'] - real_df.loc[:]['xy']\n epsilon_ver_real = real_df.loc[:]['zz'] + 2 * real_df.loc[:]['xy']\n epsilon_para_image = image_df.loc[:]['xx'] - image_df.loc[:]['xy']\n epsilon_ver_image = image_df.loc[:]['zz'] + 2 * image_df.loc[:]['xy']\n energy_ev = image_df.loc[:]['Energy']\n\n # calculate n and k in both parallel and vertical direction\n n_para = (((epsilon_para_image ** 2 + epsilon_para_real ** 2) ** 0.5 + epsilon_para_real) / 2) ** 0.5\n n_ver = (((epsilon_ver_image ** 2 + epsilon_ver_real ** 2) ** 0.5 + epsilon_ver_real) / 2) ** 0.5\n kappa_para = (((epsilon_para_image ** 2 + epsilon_para_real ** 2) ** 0.5 - epsilon_para_real) / 2) ** 0.5\n kappa_ver = (((epsilon_ver_image ** 2 + epsilon_ver_real ** 2) ** 0.5 - epsilon_ver_real) / 2) ** 0.5\n wavelength = 1240 * energy_ev ** (-1)\n\n # put the energy, n and k (in both parallel and vertical direction)\n energy_n_k = n_para.to_frame(name='n_para')\n # energy_n_k.insert(0, 'energy', image_df[:]['Energy'])\n energy_n_k.insert(0, 'wavelength', wavelength)\n energy_n_k['n_ver'], energy_n_k['kappa_para'], energy_n_k['kappa_ver'] = n_ver, kappa_para, kappa_ver\n energy_n_k.to_csv(prefix + 'n_k.csv')\n\n\ndef smooth_nk(prefix=\"\"):\n data = pd.read_csv(prefix + 'n_k.csv')\n X = data.loc[:, ['wavelength']].values\n\n y_n_para = data.loc[:, ['n_para']].values.ravel()\n y_n_ver = data.loc[:, ['n_ver']].values.ravel()\n y_k_para = data.loc[:, ['kappa_para']].values.ravel()\n y_k_ver = data.loc[:, ['kappa_ver']].values.ravel()\n\n y_n_para_smooth = scipy.signal.savgol_filter(y_n_para, 51, 3)\n y_n_ver_smooth = scipy.signal.savgol_filter(y_n_ver, 51, 3)\n y_k_para_smooth = scipy.signal.savgol_filter(y_k_para, 51, 3)\n y_k_ver_smooth = scipy.signal.savgol_filter(y_k_ver, 51, 3)\n\n fig, ax = plt.subplots()\n ax.plot(X, y_n_para_smooth, label=\"n_para.\")\n ax.plot(X, y_n_ver_smooth, label=\"n_ver.\")\n ax.plot(X, y_k_para_smooth, label=\"k_para\")\n ax.plot(X, y_k_ver_smooth, label=\"k_ver.\")\n legend = ax.legend(loc='center right', shadow=False, fontsize='x-large')\n legend.get_frame().set_facecolor('None')\n\n plt.xlim((300, 1700))\n plt.title(prefix + \"In2Se3\", fontsize=18)\n plt.xlabel('Wavelength (nm)', fontsize=18)\n plt.xticks(fontsize=16)\n plt.yticks(fontsize=16)\n plt.savefig(prefix)\n plt.show()\n\n\ndef plot_nk(prefix=\"\"):\n n_k = pd.read_csv(prefix + 'n_k.csv', index_col=0)\n n_k = n_k.replace([np.inf, -np.inf], np.nan).dropna(axis=0)\n n_k.plot(kind='line', x='wavelength', y=n_k.columns[1:])\n plt.plot(volume=True)\n plt.xlim((300, 1700))\n plt.title(prefix, fontsize=14)\n plt.xticks(fontsize=12)\n plt.yticks(fontsize=12)\n plt.xlabel('Wavelength (nm)')\n plt.savefig(prefix + \"1\")\n plt.show()\n\n\ndef main():\n for phase in ['beta_', 'alpha_', 'gamma_']:\n grep_diele_matrix(imag=True, prefix=phase)\n grep_diele_matrix(imag=False, prefix=phase)\n calc_nk(prefix=phase)\n plot_nk(prefix=phase)\n smooth_nk(prefix=phase)\n\nif __name__ == \"__main__\":\n main()\n" } ]
2
AlJohri/elections
https://github.com/AlJohri/elections
c4324fc0db20d09e4e83996104f6be5145d9eb32
c912924f47259bab5a120aae0ec1c2b92dc06a6c
65cefc3a6cc0241e017471e425052da8a5782d5c
refs/heads/master
2022-12-11T01:27:10.401788
2018-12-31T03:13:02
2018-12-31T03:13:02
163,555,581
0
0
null
2018-12-30T02:24:26
2018-12-31T03:13:07
2022-12-08T01:31:00
Python
[ { "alpha_fraction": 0.6241655349731445, "alphanum_fraction": 0.6345126628875732, "avg_line_length": 39.486488342285156, "blob_id": "46638c653a4103c2462319cb7792712aed69bf14", "content_id": "7b40485b08364f7292848ce001e7b97fe1f79799", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2996, "license_type": "no_license", "max_line_length": 123, "num_lines": 74, "path": "/thegreenpapers/filter_p_pages.py", "repo_name": "AlJohri/elections", "src_encoding": "UTF-8", "text": "import pandas as pd\n\ndf = pd.read_csv('p_pages.csv')\n\ndf = df[~df.date.str.contains(' ')] # remove all multi-day events (usually the State Convention)\n\ndf = df[~df.event.str.contains('State Convention')]\ndf = df[~df.event.str.contains('Meeting')]\ndf = df[~df.event.str.contains('Assembly')]\ndf = df[~df.event.str.contains('Committee')]\ndf = df[~df.event.str.contains('District Caucus')]\ndf = df[~df.event.str.contains('Ward Caucus')]\ndf = df[~df.event.str.contains('District Conventions: CDs')]\n\ndf = df[~df.state.str.contains('Northern Marianas')]\ndf = df[~df.state.str.contains('American Samoa')]\ndf = df[~df.state.str.contains('Puerto Rico')]\ndf = df[~df.state.str.contains('Guam')]\ndf = df[~df.state.str.contains('Virgin Islands')]\ndf = df[~df.state.str.contains('Unassigned')]\ndf = df[~df.state.str.contains('Democrats Abroad')]\n\npresidential_primaries = []\n\nfor (year, party, state), group in df.groupby(by=['year', 'party', 'state']):\n\n if year == 2016 and party == \"REP\" and state in ['Colorado', 'North Dakota', 'Wyoming']:\n \"\"\"\n 2016\n On the Republican side, American Samoa, Colorado, Guam, North Dakota, the U.S. Virgin Islands\n and Wyoming will not select delegates via caucuses or primaries. In Wyoming, delegates will be\n selected at county and state conventions; no winner has been called yet.\n \"\"\"\n continue\n\n if group.event.count() == 1:\n row = group.iloc[0]\n presidential_primaries.append(row)\n else:\n if state == 'Iowa' and group.event.str.contains('Caucus').sum() == 1:\n row = group[group.event.str.contains('Caucus')].iloc[0]\n presidential_primaries.append(row)\n else:\n print(group)\n raise Exception('must filter down group to the main event')\n # for i, row in group.iterrows():\n # presidential_primaries.append(row)\n\ndf = pd.DataFrame(presidential_primaries)\n\ngrouped_df = df.groupby(by=['year', 'party', 'state']).count()\nmore_than_one_event = grouped_df[grouped_df.date > 1]\n# print(more_than_one_event)\n\ngrouped_df2 = df.groupby(by=['year', 'party']).count()\n# print(grouped_df2)\n\ndf.sort_values(by='date').to_csv('presidential_primaries.csv', index=False)\n\ndef test(group):\n return pd.Series({\n 'DEM': (group[group.party == 'DEM'].event + ' ' + group[group.party == 'DEM'].date).tolist(),\n 'REP': (group[group.party == 'REP'].event + ' ' + group[group.party == 'REP'].date).tolist(),\n })\n\nprint('-----------------------')\n\nwith pd.option_context('display.max_rows', None, 'display.max_columns', None, 'display.width', 1000):\n print(df.groupby(by=['year', 'state', 'date'], as_index=False).apply(test).loc[2016].sort_values(by=['date', 'state']))\n\nprint('-----------------------')\n\nwith pd.option_context('display.max_rows', None, 'display.max_columns', None, 'display.width', 1000):\n print(df.groupby(by=['year', 'state'], as_index=False).apply(test).loc[2016].sort_values(by=['state']))\n" }, { "alpha_fraction": 0.7350060939788818, "alphanum_fraction": 0.7643818855285645, "avg_line_length": 35.31111145019531, "blob_id": "f16cbe0efc86cc5565ffde802da8c84ce5f2e147", "content_id": "71a16b9fe363fa5dc83c4313bf8b3fc172d429f1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1634, "license_type": "no_license", "max_line_length": 134, "num_lines": 45, "path": "/thegreenpapers/README.md", "repo_name": "AlJohri/elections", "src_encoding": "UTF-8", "text": "# thegreenpapers\n\nScrape http://www.thegreenpapers.com/ to get a list of all elections from 2002+.\n\n#### Events Page\n\n##### GXX\n\nGoes back to `G02`.\n\n- Alphabetically: http://www.thegreenpapers.com/G14/events.phtml\n- Alphabetically with Filing Deadlines: http://www.thegreenpapers.com/G14/events.phtml?type=ef\n- Chronologically: http://www.thegreenpapers.com/G14/events.phtml?format=chronological\n- Chronologically with Filing Deadlines: http://www.thegreenpapers.com/G14/events.phtml?format=chronological&type=ef\n\n##### PXX\n\nGoes back to `P04`.\n\n- Alphabetically: https://www.thegreenpapers.com/P16/events.phtml?s=a\n- Chronologically: https://www.thegreenpapers.com/P16/events.phtml?s=c\n- Major Events Alphabetically: https://www.thegreenpapers.com/P16/events.phtml?s=a&f=m\n- Major Events Chronologically: https://www.thegreenpapers.com/P16/events.phtml?s=c&f=m\n- \"First Determining Step\" Alphabetically: https://www.thegreenpapers.com/P16/events.phtml?s=a&f=1\n- \"First Determining Step\" Chronologically: https://www.thegreenpapers.com/P04/events.phtml?s=c&f=1\n\n#### PXX \"Primaries at a Glance\" Page\n\n- https://www.thegreenpapers.com/P04/paag.phtml\n- https://www.thegreenpapers.com/P08/paag.phtml\n- https://www.thegreenpapers.com/P16/paag.phtml\n\n#### Download Page\n\nFolder newer elections (2012+), thegreenpapers provides downloadable spreadsheets with events, politicians, delegate counts, and more.\n\n##### GXX\n\n- https://www.thegreenpapers.com/G12/download.phtml\n- https://www.thegreenpapers.com/G18/download.phtml\n\n##### PXX\n\n- https://www.thegreenpapers.com/P12/download.phtml\n- https://www.thegreenpapers.com/P20/download.phtml\n" }, { "alpha_fraction": 0.47152265906333923, "alphanum_fraction": 0.47772181034088135, "avg_line_length": 35.871429443359375, "blob_id": "9b3fdf6560d3d4f69d00e3b00ad11eda324e7307", "content_id": "f970f881da57f47dc26aa1a2869fe68b8d9d643f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2581, "license_type": "no_license", "max_line_length": 114, "num_lines": 70, "path": "/uselectionatlas/scrape.py", "repo_name": "AlJohri/elections", "src_encoding": "UTF-8", "text": "import csv\nimport requests\nimport lxml.html\nimport pendulum\n\ndef inclusive_range(start, end, step=1):\n return range(start, end+step, step)\n\ndef parse_date(month_day, year):\n if ',' in month_day:\n month_day = month_day.split(',')[0]\n date = month_day + ' ' + str(year)\n try:\n return pendulum.from_format(date, 'MMM D YYYY').date().isoformat()\n except Exception:\n return pendulum.from_format(date, 'MMMM D YYYY').date().isoformat()\n\nif __name__ == \"__main__\":\n\n primaries = []\n\n p_type_map = {\n '(C)': 'caucus',\n '(SC)': 'SC??',\n '(CC)': 'CC??'\n }\n\n non_a_tag_states = ['DC (C)', 'ID (C)']\n\n for year in inclusive_range(2000, 2016, 4):\n for party in ['D', 'R']:\n url = f'https://uselectionatlas.org/USPRESIDENT/PRIMARY/MENU_STATETXT/statemenutxt{year}{party}.html'\n response = requests.get(url)\n doc = lxml.html.fromstring(response.text)\n\n current_date = None\n for tr in doc.cssselect('table tr')[3:]:\n td = tr.cssselect('td')[0]\n \n if len(td.getchildren()) == 0:\n if not td.text.strip():\n continue\n if td.text not in non_a_tag_states:\n current_date = parse_date(td.text, str(year))\n continue\n \n if td.text in non_a_tag_states:\n state = td.text\n p_type = 'primary'\n if ' ' in state:\n state, p_type = state.split(' ')\n p_type = p_type_map[p_type]\n primary = {'year': year, 'party': party, 'state': state, 'date': current_date, 'type': p_type}\n primaries.append(primary)\n elif td.cssselect('a'):\n if not current_date:\n raise Exception('no date currently set?')\n state = td.cssselect('a')[0].text\n p_type = 'primary'\n if ' ' in state:\n state, p_type = state.split(' ')\n p_type = p_type_map[p_type]\n primary = {'year': year, 'party': party, 'state': state, 'date': current_date, 'type': p_type}\n print(primary)\n primaries.append(primary)\n\n with open('primaries.csv', 'w') as f:\n writer = csv.DictWriter(f, fieldnames=primaries[0].keys())\n writer.writeheader()\n writer.writerows(primaries)\n" }, { "alpha_fraction": 0.5248227119445801, "alphanum_fraction": 0.5376653075218201, "avg_line_length": 38.82442855834961, "blob_id": "10a9a8399699bf5830f02546ac84f459a7fc6400", "content_id": "0a9b9391ec157dd88ddb4a41f6844a630e083753", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5217, "license_type": "no_license", "max_line_length": 116, "num_lines": 131, "path": "/thegreenpapers/scrape.py", "repo_name": "AlJohri/elections", "src_encoding": "UTF-8", "text": "import csv\nimport requests\nimport lxml.html\nimport pendulum\n\ndef inclusive_range(start, end, step=1):\n return range(start, end+step, step)\n\ndef parse_g_alphabetical(html, year):\n rows = []\n doc = lxml.html.fromstring(html)\n for row in doc.cssselect('table tbody tr'):\n\n try:\n state = row.cssselect('td')[0].cssselect('a')[1].text\n except Exception:\n state = row.cssselect('td')[0].cssselect('a')[0].text\n\n for x in row.cssselect('td')[1].cssselect('a'):\n text = x.text\n if not text:\n print(f'[{year} {state}] no events')\n continue\n if text.count(' - ') == 1:\n event, _, date_str = text.rpartition(' - ')\n date = pendulum.from_format(date_str, 'dddd DD MMMM YYYY', tz=None).date().isoformat()\n else:\n event, _, date_str = text.rpartition(' - ')\n date2 = pendulum.from_format(date_str, 'dddd DD MMMM YYYY', tz=None).date().isoformat()\n event, _, date_str = text.rpartition(' - ')\n date1 = pendulum.from_format(date_str, 'dddd DD MMMM YYYY', tz=None).date().isoformat()\n date = f'{date1} {date2}'\n output = {'year': year, 'date': date, 'state': state, 'event': event}\n print(output)\n rows.append(output)\n return rows\n\ndef parse_p_alphabetical(html, year):\n rows = []\n doc = lxml.html.fromstring(html)\n for row in doc.cssselect('table tbody tr'):\n\n try:\n state = row.cssselect('td')[0].cssselect('a')[1].text\n except Exception:\n state = row.cssselect('td')[0].cssselect('a')[0].text\n\n party = 'DEM'\n for x in row.cssselect('td')[1].cssselect('a span'):\n text = x.text\n if not text:\n print(f'[{year} {state}] no events')\n continue\n if text.count(' - ') == 1:\n event, _, date_str = text.rpartition(' - ')\n date = pendulum.from_format(date_str, 'dddd DD MMMM YYYY', tz=None).date().isoformat()\n else:\n event, _, date_str = text.rpartition(' - ')\n date2 = pendulum.from_format(date_str, 'dddd DD MMMM YYYY', tz=None).date().isoformat()\n event, _, date_str = text.rpartition(' - ')\n date1 = pendulum.from_format(date_str, 'dddd DD MMMM YYYY', tz=None).date().isoformat()\n date = f'{date1} {date2}'\n output = {'year': year, 'date': date, 'state': state, 'event': event, 'party': party, 'type': 'primary'}\n print(output)\n rows.append(output)\n \n party = 'REP'\n for x in row.cssselect('td')[2].cssselect('a span'):\n text = x.text\n if not text:\n print(f'[{year} {state}] no events')\n continue\n if text.count(' - ') == 1:\n event, _, date_str = text.rpartition(' - ')\n date = pendulum.from_format(date_str, 'dddd DD MMMM YYYY', tz=None).date().isoformat()\n else:\n event, _, date_str = text.rpartition(' - ')\n date2 = pendulum.from_format(date_str, 'dddd DD MMMM YYYY', tz=None).date().isoformat()\n event, _, date_str = text.rpartition(' - ')\n date1 = pendulum.from_format(date_str, 'dddd DD MMMM YYYY', tz=None).date().isoformat()\n date = f'{date1} {date2}'\n output = {'year': year, 'date': date, 'state': state, 'event': event, 'party': party, 'type': 'primary'}\n print(output)\n rows.append(output)\n\n return rows\n\nif __name__ == \"__main__\":\n\n # GXX Pages\n\n g_pages = []\n for year in inclusive_range(2, 18):\n url = f'https://www.thegreenpapers.com/G{year:02}/events.phtml'\n response = requests.get(url)\n g_pages += parse_g_alphabetical(response.text, 2000+year)\n\n with open('g_pages.csv', 'w') as f:\n writer = csv.DictWriter(f, fieldnames=g_pages[0].keys())\n writer.writeheader()\n writer.writerows(g_pages)\n \n # PXX Pages\n\n p_pages = []\n\n for year in inclusive_range(4, 16, 4):\n url = f'https://www.thegreenpapers.com/P{year:02}/events.phtml?s=a&f=m'\n response = requests.get(url)\n p_pages += parse_p_alphabetical(response.text, 2000+year)\n\n with open('p_pages.csv', 'w') as f:\n writer = csv.DictWriter(f, fieldnames=p_pages[0].keys())\n writer.writeheader()\n writer.writerows(p_pages)\n \n # Data Download Pages\n\n for year in inclusive_range(12, 20, 4):\n url = f'https://www.thegreenpapers.com/P{year:02}/download.phtml'\n response = requests.get(url)\n link = lxml.html.fromstring(response.text).cssselect(\"a[target]\")[0].get('href')\n link = link.replace('?dl=0', '?dl=1')\n print(url, link)\n\n for year in inclusive_range(12, 18):\n url = f'https://www.thegreenpapers.com/G{year:02}/download.phtml'\n response = requests.get(url)\n link = lxml.html.fromstring(response.text).cssselect(\"a[target]\")[0].get('href')\n link = link.replace('?dl=0', '?dl=1')\n print(url, link)\n" }, { "alpha_fraction": 0.801886796951294, "alphanum_fraction": 0.8396226167678833, "avg_line_length": 34.33333206176758, "blob_id": "5379bc478aee700b38c4b1483038cecc960131d4", "content_id": "be0c39d03c7795679295616e79a2c7e2fd9d796e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 106, "license_type": "no_license", "max_line_length": 86, "num_lines": 3, "path": "/uselectionatlas/README.md", "repo_name": "AlJohri/elections", "src_encoding": "UTF-8", "text": "# uselectionatlas\n\n- https://uselectionatlas.org/USPRESIDENT/PRIMARY/MENU_STATETXT/statemenutxt2016D.html\n" } ]
5
fjehlik/UCI_heart_disease
https://github.com/fjehlik/UCI_heart_disease
7f095d5f86c08ea8d6779791978ce7c8a235d68d
3f8307e6ba6d5eee4df1d46469c630a5721ddb4a
35d9aeabd5929d4a81e512c68993cb281f56feb5
refs/heads/master
2020-05-18T07:46:32.586294
2020-04-23T14:17:09
2020-04-23T14:17:09
184,274,530
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6136060357093811, "alphanum_fraction": 0.6433970928192139, "avg_line_length": 27.61842155456543, "blob_id": "3aba8a5bd11ed459a3a7bdb41b1a5852d098b603", "content_id": "bddb6b22ee19c298e709ece643872e6c2cc391ef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2249, "license_type": "no_license", "max_line_length": 107, "num_lines": 76, "path": "/heart_xgBoost_v2_gridsearch.py", "repo_name": "fjehlik/UCI_heart_disease", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Apr 17 13:49:54 2019\r\n\r\n@author: fjehlik\r\n\"\"\"\r\n\r\nimport os\r\nos.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\";\r\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"1\";\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport xgboost as xgb\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.metrics import accuracy_score\r\nfrom sklearn.model_selection import GridSearchCV\r\nfrom sklearn.model_selection import train_test_split\r\n\r\n# Select directory to import data\r\nos.chdir(r\"C:\\Users\\fjehlik\\Documents\\01 ML kaggle\\01 heart disease UCI\")\r\n\r\n# Import and read Excel file data and transform to dataframe\r\ndf = pd.DataFrame()\r\nfile = 'heart.csv'\r\ndf = pd.read_csv(file, header=0)\r\n\r\n# Establish features and target\r\nfeat = [\r\n 'age' , \\\r\n 'sex' , \\\r\n 'cp' , \\\r\n 'trestbps' , \\\r\n 'chol' , \\\r\n 'fbs' , \\\r\n 'restecg' , \\\r\n 'thalach' , \\\r\n 'exang' , \\\r\n 'oldpeak' , \\\r\n 'slope' , \\\r\n 'ca' , \\\r\n 'thal']\r\n\r\ntarg = 'target'\r\n\r\n# Establish the features and targets\r\nX = df[feat]\r\ny = df[targ]\r\n\r\n# split data into train and test sets\r\ntest_size = 0.2\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=16)\r\n\r\n# Establish XGB classifier and the grid to search\r\nclf = xgb.XGBClassifier()\r\ngrid = {'learning_rate':[0.01, 0.1, 0.15, 1], 'n_estimators': [25, 50, 100, 120, 140], 'max_depth':[1,2,3]}\r\n\r\n# Block out the following code if you want to run the best parameters below\r\nCV_clf = GridSearchCV(estimator=clf, param_grid=grid, cv=5, verbose=2)\r\nCV_clf.fit(X_train, y_train)\r\nprint(CV_clf.best_params_)\r\n\r\n# Pass the best estimators to the clf and refit using best parameters\r\nclf_best = xgb.XGBClassifier(**CV_clf.best_params_)\r\nclf=clf_best.fit(X_train, y_train)\r\nclf.fit(X_train, y_train)\r\n\r\n# Best values are {'learning_rate': 0.15, 'max_depth': 1, 'n_estimators': 50}\r\n#clf = xgb.XGBClassifier(learning_rate = 0.15, max_depth=1, n_estimators=50)\r\n#clf.fit(X_train, y_train)\r\n# make predictions for test data\r\ny_pred = clf.predict(X_test)\r\npredictions = [round(value) for value in y_pred]\r\n# evaluate predictions\r\naccuracy = accuracy_score(y_test, predictions)\r\nprint(\"Accuracy: %.2f%%\" % (accuracy * 100.0))" }, { "alpha_fraction": 0.6328800916671753, "alphanum_fraction": 0.6559538245201111, "avg_line_length": 30.783782958984375, "blob_id": "0cd617ccb4536bb51c368c26a8a399df548f2ca3", "content_id": "d512d861a25ad59b60db1f6656da3c7ea166e6cc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4854, "license_type": "no_license", "max_line_length": 120, "num_lines": 148, "path": "/heart_RandomForest_gridSearch.py", "repo_name": "fjehlik/UCI_heart_disease", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Apr 17 09:48:13 2019\r\n\r\n@author: fjehlik\r\n\r\n*****************************REFERENCE*********************************\r\nKaggle heart predictor:\r\nhttps://www.kaggle.com/ronitf/heart-disease-uci\r\n\r\nThis project predicts heart disease from features of historical hospital records. \r\n\r\nThis database contains 76 attributes, but all published experiments refer to using a subset of 14 of them. \r\nIn particular, the Cleveland database is the only one that has been used by ML researchers to this date. \r\nThe \"goal\" field refers to the presence of heart disease in the patient. It is integer valued from 0 (no presence) to 4.\r\n\r\n\r\nThis program utilizes random forest classifier approach to predicting the outcome\r\n\r\n\r\nage: age in years\r\nsex: (1 = male; 0 = female)\r\ncp: chest pain type\r\ntrestbps: resting blood pressure (in mm Hg on admission to the hospital)\r\nchol: serum cholestoral in mg/dl\r\nfbs: (fasting blood sugar > 120 mg/dl) (1 = true; 0 = false)\r\nrestecg: resting electrocardiographic results\r\nthalach: maximum heart rate achieved\r\nexang: exercise induced angina (1 = yes; 0 = no)\r\noldpeak: ST depression induced by exercise relative to rest\r\nslope: the slope of the peak exercise ST segment\r\nca: number of major vessels (0-3) colored by flourosopy\r\nthal: 3 = normal; 6 = fixed defect; 7 = reversable defect\r\ntarget: 1 or 0\r\n\r\n***********************************************************************\r\n\r\n\"\"\"\r\n\r\nimport os\r\nos.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\";\r\n# The GPU id to use, usually either \"0\" or \"1\";\r\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"1\";\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.model_selection import GridSearchCV\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.model_selection import train_test_split\r\n\r\n# Select directory to import data\r\nos.chdir(r\"C:\\Users\\fjehlik\\Documents\\01 ML kaggle\\01 heart disease UCI\")\r\n\r\n# Import and read Excel file data and transform to dataframe\r\ndf = pd.DataFrame()\r\nfile = 'heart.csv'\r\ndf = pd.read_csv(file, header=0)\r\n\r\n# Establish X and y\r\nfeat = [\r\n 'age' , \\\r\n 'sex' , \\\r\n 'cp' , \\\r\n 'trestbps' , \\\r\n 'chol' , \\\r\n 'fbs' , \\\r\n 'restecg' , \\\r\n 'thalach' , \\\r\n 'exang' , \\\r\n 'oldpeak' , \\\r\n 'slope' , \\\r\n 'ca' , \\\r\n 'thal']\r\n\r\ntarg = 'target'\r\n\r\nX = df[feat]\r\ny = df[targ]\r\n\r\n# Establisg the training and testing datatests\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20, random_state = 72)\r\n\r\n# Show the results of the split\r\nprint(\"Training set has {} samples.\".format(X_train.shape[0]))\r\nprint(\"Testing set has {} samples.\".format(X_test.shape[0]))\r\n\r\n\r\n# Randomforest classifier modeling section\r\nclf = RandomForestClassifier(random_state = 72)\r\nparam_grid = { \r\n 'max_features': ['auto'],\r\n 'n_estimators' : [10,50,100], \r\n 'max_depth' : [1,2,5,8],\r\n 'min_samples_split' : [5,8,10], \r\n 'min_samples_leaf' : [5,10,15],\r\n 'criterion' :['gini', 'entropy']\r\n}\r\n\r\nCV_clf = GridSearchCV(estimator=clf, param_grid=param_grid, cv= 5, verbose=2)\r\nCV_clf.fit(X_train, y_train)\r\nprint(CV_clf.best_params_)\r\n\r\n# Pass the best estimators to the model and refit using best parameters\r\nclf_best = RandomForestClassifier(**CV_clf.best_params_)\r\nmodel=clf_best.fit(X_train, y_train)\r\n\r\n# Use the best trained model to predict the blind test data\r\ny_pred = clf_best.predict(X_test) \r\nscore_train = clf_best.score(X_train, y_train)\r\nscore_test = clf_best.score(X_test, y_test)\r\n\r\nprint(\"Score train: \" +str(round(score_train,3)))\r\nprint(\"Score test: \" +str(round(score_test,3)))\r\n\r\n# Extract the feature importances using .feature_importances_ \r\nimportances = clf_best.feature_importances_\r\n\r\n# Display the nine most important X\r\nindices = np.argsort(importances)[::-1]\r\ncolumns = X_train.columns.values[indices[:len(feat)]]\r\nvalues = importances[indices][:len(feat)]\r\n\r\n# Plot Univariate Histograms\r\nX.hist(figsize=(10,12))\r\nplt.show()\r\n\r\n# Plot the weight impportances\r\nfig = plt.figure(figsize=(10,12))\r\nplt.title(\"Normalized Weights for Most Predictive X\", fontsize = 12)\r\nplt.bar(np.arange(len(feat)), values, width = 0.6, align=\"center\", color = '#00A000', \\\r\n label = \"Feature Weight\")\r\nplt.xticks(np.arange(len(feat)), columns, rotation='vertical')\r\nplt.xlim((-0.5, 13.5))\r\nplt.ylabel(\"Weight\", fontsize = 12)\r\nplt.xlabel(\"Feature\", fontsize = 12)\r\nplt.legend(loc = 'upper right')\r\nplt.show() \r\n\r\n# Plot the predicted vs actual for each point in counting order\r\ncount = np.linspace(0,len(y_test),num=len(y_test))\r\nfig = plt.figure(figsize=(8,5))\r\nplt.plot(count, y_test, 'o')\r\nplt.plot(count, y_pred, 'x')\r\nplt.ylabel(\"Target: 0=safe, 1=failure\", fontsize = 12)\r\nplt.xlabel(\"Patient number\", fontsize = 12)\r\nplt.legend(loc = 'best')\r\nplt.show() \r\n" } ]
2
dreamraw/demopygit
https://github.com/dreamraw/demopygit
3afd725dce6af34eb617f8465011390f76d38c79
41fcc415cece334e9509e0b638858f8eb56320b0
e8976603b9f147bf976d52eba63be456d89535b3
refs/heads/master
2021-01-14T01:18:43.266196
2020-02-23T16:55:46
2020-02-23T16:55:46
242,554,855
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6153846383094788, "alphanum_fraction": 0.6153846383094788, "avg_line_length": 12, "blob_id": "b53961759da2521c995b367d84651781f20dd4dd", "content_id": "2dd7845846667da870d3122ad7cd48b998e87f95", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13, "license_type": "no_license", "max_line_length": 12, "num_lines": 1, "path": "/demo.py", "repo_name": "dreamraw/demopygit", "src_encoding": "UTF-8", "text": "print(\"raw\")\n" } ]
1
nadvornix/visacky
https://github.com/nadvornix/visacky
b463794feecb9c427a1a5c907fcdf2e99c5e9709
6ae92c1e50b06532663e3e3e7ed5040a3ce276e0
b346025e0c6a4654a4322c7b119226f1de9eda9e
refs/heads/master
2020-03-25T18:01:03.568241
2018-08-08T12:15:17
2018-08-08T12:15:17
144,008,945
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6315580010414124, "alphanum_fraction": 0.6597774028778076, "avg_line_length": 28.611764907836914, "blob_id": "0c3c5c117ff320f510185d93c18d675734af1266", "content_id": "6ab49ee63a011936217169672ca87baadf7ead2b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2516, "license_type": "no_license", "max_line_length": 87, "num_lines": 85, "path": "/gen.py", "repo_name": "nadvornix/visacky", "src_encoding": "UTF-8", "text": "import xml.etree.ElementTree as ET\nfrom itertools import zip_longest\n\n# from IPython import embed; embed()\n# ET.register_namespace('', \"http://www.topografix.com/GPX/1/1\")\nET.register_namespace(\"dc\", \"http://purl.org/dc/elements/1.1/\")\nET.register_namespace(\"cc\", \"http://creativecommons.org/ns#\")\nET.register_namespace(\"rdf\", \"http://www.w3.org/1999/02/22-rdf-syntax-ns#\")\nET.register_namespace(\"svg\", \"http://www.w3.org/2000/svg\")\nET.register_namespace(\"\", \"http://www.w3.org/2000/svg\")\nET.register_namespace(\"xlink\", \"http://www.w3.org/1999/xlink\")\nET.register_namespace(\"sodipodi\", \"http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd\")\nET.register_namespace(\"inkscape\", \"http://www.inkscape.org/namespaces/inkscape\")\n\n\nlines = open(\"orgs.csv\").readlines()\nnames = [line.strip() for line in lines]\n\ndef grouper(iterable, n, fillvalue=None):\n \"Collect data into fixed-length chunks or blocks\"\n # grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx\"\n args = [iter(iterable)] * n\n return zip_longest(*args, fillvalue=fillvalue)\n\n\ndef split_name(name):\n if not name or not name.strip():\n return \" \", \" \"\n parts = name.split()\n jmeno = \" \".join(parts[:-1])\n if jmeno.strip() == \"\":\n return name, \"\"\n prijmeni = parts[-1]\n return jmeno, prijmeni\n\nfor i, (n1, n2, n3, n4) in enumerate(grouper(names, 4)):\n tree = ET.parse('orgs.svg')\n root = tree.getroot()\n\n # from IPython import embed; embed()\n # sys.exit()\n elems = list(root.iter(\"{http://www.w3.org/2000/svg}flowPara\"))\n \n jmena = [e for e in elems if e.text == \"AAA\"]\n prijmeni = [e for e in elems if e.text == \"BBB\"]\n\n\n name, surname = split_name(n1)\n jmena[0].text = name\n prijmeni[0].text = surname\n\n name, surname = split_name(n2)\n jmena[1].text = name\n prijmeni[1].text = surname\n\n name, surname = split_name(n3)\n jmena[2].text = name\n prijmeni[2].text = surname\n\n name, surname = split_name(n4)\n jmena[3].text = name\n prijmeni[3].text = surname\n\n tree.write(open(\"{}a.svg\".format(i), 'w'), encoding='unicode')\n\n name, surname = split_name(n1)\n jmena[1].text = name\n prijmeni[1].text = surname\n\n name, surname = split_name(n2)\n jmena[0].text = name\n prijmeni[0].text = surname\n\n name, surname = split_name(n3)\n jmena[3].text = name\n prijmeni[3].text = surname\n\n name, surname = split_name(n4)\n jmena[2].text = name\n prijmeni[2].text = surname\n\n tree.write(open(\"{}b.svg\".format(i), 'w'), encoding='unicode')\n\n# for neighbor in root.iter(\"{http://www.w3.org/2000/svg}tspan\"):\n # print (neighbor.text=\"CCC DDD\")" }, { "alpha_fraction": 0.5891891717910767, "alphanum_fraction": 0.6270270347595215, "avg_line_length": 13.307692527770996, "blob_id": "d90f7c04cef75f00d4b3ab09f80c36a2c896bf8d", "content_id": "6c4f781440106461add6bde71576f1d48db81e59", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 185, "license_type": "no_license", "max_line_length": 38, "num_lines": 13, "path": "/gen.sh", "repo_name": "nadvornix/visacky", "src_encoding": "UTF-8", "text": "#!/bin/bash\nrm [0-9]*\n\npython3 gen.py\n\nfor f in [0-9]**.svg\ndo\n\techo $f\n\tinkscape $f --export-pdf=$f.pdf\n\t# rsvg-convert -f pdf -o $f.pdf $f\ndone\n\npdftk [0-9]*.pdf cat output output.pdf" } ]
2
Kyeongjun/command_hello
https://github.com/Kyeongjun/command_hello
770c1f9008e2e3432f7103995684c73613a2fe93
4ba49d5d941cca4b458608e857fcd4650cebf935
fa2cb9878d4e126d1e1531b622e8deff835a68df
refs/heads/master
2020-06-20T11:25:03.889977
2019-07-16T05:18:32
2019-07-16T05:18:32
197,107,722
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7301587462425232, "alphanum_fraction": 0.7301587462425232, "avg_line_length": 20, "blob_id": "5390120edf2354c05654667c8f86ced4299984db", "content_id": "8f4d88017424e9aa28e02e2248e902b44725ea3b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 126, "license_type": "no_license", "max_line_length": 32, "num_lines": 6, "path": "/hello.py", "repo_name": "Kyeongjun/command_hello", "src_encoding": "UTF-8", "text": "// For command line git tutorial\n// GitHub modification\n\nprint(\"Hello world\")\nprint(\"Tell Your World\")\nprint(\"Tell my world\")\n" } ]
1
markx3/MiniMiner
https://github.com/markx3/MiniMiner
ba91799635f37d352390414c6b9d20538c9be45f
f90ab3fd580fa2f71ae2a5ec87793ab28d1a7bd9
fbda139e9589a257d533d04d04432b0b91f9d90f
refs/heads/master
2020-03-18T05:23:11.515003
2018-05-24T21:53:28
2018-05-24T21:53:28
134,340,052
5
1
null
null
null
null
null
[ { "alpha_fraction": 0.5664864778518677, "alphanum_fraction": 0.5729729533195496, "avg_line_length": 31.647058486938477, "blob_id": "d0b9232017c8079f0dc0f245a1c08ea0fa8fa3b0", "content_id": "43bade5fd44202c90ebd854157227b53a9d19918", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2775, "license_type": "no_license", "max_line_length": 79, "num_lines": 85, "path": "/miniminer.py", "repo_name": "markx3/MiniMiner", "src_encoding": "UTF-8", "text": "\"\"\"Miniminer.\"\"\"\nimport json\nfrom hashlib import sha256\nimport requests\nimport math\nfrom constants import token\n\n\nclass MiniMinerAPI():\n \"\"\"Miniminer API.\n\n Defines get and post methods to get problem and send solution to endpoint.\n \"\"\"\n\n base_url = 'https://hackattic.com/challenges/mini_miner/'\n problem_url = 'problem?access_token='\n solution_url = 'solve?access_token='\n\n def __init__(self, token):\n \"\"\"Initialize API with provided token.\"\"\"\n self.token = token\n\n def get(self, url):\n \"\"\"Return problem from endpoint.\"\"\"\n response = requests.get(url + self.token).json()\n return response\n\n def post(self, nonce, url):\n \"\"\"Send solution to endpoint and print results.\"\"\"\n payload = {'nonce': nonce}\n response = requests.post(url + self.token + '&playground=1',\n json=payload)\n print(response.status_code, response.json())\n return response.json()\n\n\nclass MiniMiner():\n \"\"\"Solves hackattic's MiniMiner problem in playground mode.\n\n A JSON is received from the endpoint with three attributes:\n 'block', 'nonce' and 'difficulty'. MiniMiner's goal is to\n find a 'nonce' that causes the SHA256 hash of 'block' to\n start with 'difficulty' 0 bits. That is, if difficulty is\n 4, the hash should start with at least 4 zero bits.\n \"\"\"\n\n def __init__(self, token):\n \"\"\"Initalize MiniMiner with token provided by user.\"\"\"\n self.debug = False\n self.token = token\n\n def run(self, debug=False):\n \"\"\"Miniminer main function. Fetch problem, solve and send.\"\"\"\n self.debug = debug\n api = MiniMinerAPI(self.token)\n response = api.get(api.base_url + api.problem_url)\n nonce, digest = self._get_nonce(response['block'],\n response['difficulty'])\n print(nonce, digest)\n return(api.post(nonce, api.base_url + api.solution_url))\n\n def _get_nonce(self, block, diff):\n \"\"\"Bruteforce nonce until expected minimum zero bits is found.\n\n Returns nonce and digest.\n \"\"\"\n nonce = -1\n expected = '0' * math.ceil(diff/4)\n while 1:\n nonce += 1\n block['nonce'] = nonce\n block_json = json.dumps(block,\n sort_keys=True,\n separators=(',', ':'))\n digest = sha256(block_json.encode()).hexdigest()\n if self.debug:\n msg = 'diff: %s\\tnonce: %s\\tdigest: %s' % (diff, nonce, digest)\n print(msg)\n if digest.startswith(expected):\n return nonce, digest\n\n\nif __name__ == '__main__':\n mm = MiniMiner(token)\n mm.run(debug=True)\n" }, { "alpha_fraction": 0.6361047625541687, "alphanum_fraction": 0.6423690319061279, "avg_line_length": 32.132076263427734, "blob_id": "d9448e0b61b9248d999fd7f016cf4939f1cadef2", "content_id": "1a3fd81953b0fb969be3745b864074a2065d1e72", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1756, "license_type": "no_license", "max_line_length": 74, "num_lines": 53, "path": "/test_miniminer.py", "repo_name": "markx3/MiniMiner", "src_encoding": "UTF-8", "text": "\"\"\"MiniMiner test suite.\"\"\"\nimport unittest\nfrom unittest.mock import patch\nfrom miniminer import MiniMiner, MiniMinerAPI\n\n\nclass MiniMinerTest(unittest.TestCase):\n \"\"\"MiniMiner test class.\n\n Creates a test problem based off hackattic's example. POST and GET\n functions are patched to return expected results. Expected nonce from\n test block is 45.\n \"\"\"\n\n # TODO: Check mock value to change response accordingly\n\n test_problem = {'block': {'data': [], 'nonce': None}, 'difficulty': 8}\n passed = {'result': 'passed (playground mode)'}\n\n def setUp(self):\n \"\"\"Patch requests get and post methods.\"\"\"\n get_patcher = patch('miniminer.requests.get')\n self.mock_get = get_patcher.start()\n self.mock_get.return_value.status_code = 200\n self.mock_get.return_value.json.return_value = self.test_problem\n\n post_patcher = patch('miniminer.requests.post')\n self.mock_post = post_patcher.start()\n self.mock_post.return_value.status_code = 200\n self.mock_post.return_value.json.return_value = self.passed\n\n def test_get(self):\n \"\"\"Get test problem from stub and check it.\"\"\"\n response = MiniMinerAPI('someToken').get('null')\n self.assertEqual(response, self.test_problem)\n\n def test_post(self):\n \"\"\"Post solution to stub and check it.\"\"\"\n passed = {'result': 'passed (playground mode)'}\n response = MiniMinerAPI('someToken').post(45, 'null')\n self.assertEqual(response, passed)\n\n def test_run(self):\n \"\"\"Run MiniMiner's main method.\n\n Check if response is OK.\n \"\"\"\n result = MiniMiner('token').run()\n self.assertEqual(result, self.passed)\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.7339622378349304, "alphanum_fraction": 0.74622642993927, "avg_line_length": 41.400001525878906, "blob_id": "a1be029e26bd2cdb4c6ea2ed0462b3538d7f16bd", "content_id": "65040f779d9d4ca6926b0e6ebcad1da5c1e91bbb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1060, "license_type": "no_license", "max_line_length": 76, "num_lines": 25, "path": "/README.md", "repo_name": "markx3/MiniMiner", "src_encoding": "UTF-8", "text": "# Mini Miner\nSolves hackattic's MiniMiner problem in playground mode!\n\nA JSON is received from the endpoint with two attributes:\n`block` and `difficulty`. The `block` attribute contains\n`data`, which houses arbitrary data and `nonce`. MiniMiner's goal is to\nfind a `nonce` value that causes the SHA256 hash of `block` to\nstart with `difficulty` zero bits. That is, if `difficulty` is\n4, the hash should start with at least 4 zero bits.\n\n## How to run\nTo run Mini Miner you should have an access token from hackattic. Go to\n[hackattic](https://hackattic.com/) and sign up! Once you've got your token,\ninput it to `constants.py`.\nIf you're running python 3.3+, all you need is `requests`. Install it with\n`pip install requests`. Else, you should\ninstall python's latest version (and then get `requests` from pip). \nIf you're on a Mac, use `brew install python`.\nOn debian-based distros, use `sudo apt install python3.6`.\n\n### Running tests\nTo run tests, run `python3.6 test_miniminer.py`.\n\n### Running Mini Miner\nTo run Mini Miner, run `python3.6 miniminer.py`.\n" } ]
3
sergeibershadsky/bugger_django
https://github.com/sergeibershadsky/bugger_django
dfabed48bc525dd128399163618425d5fce30d97
279c4b4d85577a69458dae5f8b2e48b7b019acaa
0dd5644211b9edd090627ba4be30667dfa4e57b9
refs/heads/master
2022-12-01T00:48:22.126835
2020-08-20T15:11:40
2020-08-20T15:11:40
288,751,245
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7120622396469116, "alphanum_fraction": 0.7120622396469116, "avg_line_length": 31.125, "blob_id": "c39199cf4f40ba5766e2e8b3fb9c2ab497f0863e", "content_id": "f2601250ef2bbf3dbdf0ed9c61c98de1d01ad47a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 257, "license_type": "no_license", "max_line_length": 72, "num_lines": 8, "path": "/bugger_django/urls.py", "repo_name": "sergeibershadsky/bugger_django", "src_encoding": "UTF-8", "text": "from django.urls import path\n\nfrom articles.views import ArticlesView, refresh_articles_view\n\nurlpatterns = [\n path('posts/', ArticlesView.as_view({'get': 'list'}), name='posts'),\n path('force-refresh/', refresh_articles_view, name='force-refresh')\n]\n" }, { "alpha_fraction": 0.6659729480743408, "alphanum_fraction": 0.6680541038513184, "avg_line_length": 21.880952835083008, "blob_id": "7d35141b9f20c2f41c497e0bed5e9bf3161bd820", "content_id": "7332539e7cac35679d8b327aa5d1b09dd132cd3a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 961, "license_type": "no_license", "max_line_length": 60, "num_lines": 42, "path": "/articles/tasks.py", "repo_name": "sergeibershadsky/bugger_django", "src_encoding": "UTF-8", "text": "from typing import List\n\nimport dramatiq\nimport requests\nfrom lxml import html\nfrom .models import Article\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_articles() -> List[dict]:\n html_str = fetch_page()\n root = html.fromstring(html_str)\n articles = root.xpath(\"//a[@class='storylink']\")\n return [\n dict(title=article.text, url=article.attrib['href'])\n for article in articles\n ]\n\n\ndef fetch_page() -> str:\n url = \"https://news.ycombinator.com/\"\n request = requests.get(url)\n request.raise_for_status()\n return request.text\n\n\ndef refresh_articles():\n articles = get_articles()\n assert len(articles) == 30, \"Hackernews goes wrong\"\n Article.objects.all().delete()\n Article.objects.bulk_create(\n [Article(**article) for article in articles]\n )\n\n\n@dramatiq.actor\ndef refresh_articles_job():\n logger.info(\"Refresh job started\")\n refresh_articles()\n logger.info(\"Job finished\")\n" }, { "alpha_fraction": 0.7974452376365662, "alphanum_fraction": 0.7974452376365662, "avg_line_length": 29.44444465637207, "blob_id": "448f3b48b998037639bcab6dac421ed946793f5b", "content_id": "9213d7132b88fdff978eddfb0aacb2caef9aea46", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 548, "license_type": "no_license", "max_line_length": 50, "num_lines": 18, "path": "/articles/views.py", "repo_name": "sergeibershadsky/bugger_django", "src_encoding": "UTF-8", "text": "from django.http import HttpResponse\nfrom rest_framework import viewsets, filters\nfrom .models import Article\nfrom .serializers import ArticleSerializer\nfrom .pagination import CustomPagination\nfrom .tasks import refresh_articles_job\n\n\nclass ArticlesView(viewsets.ReadOnlyModelViewSet):\n queryset = Article.objects.all()\n serializer_class = ArticleSerializer\n pagination_class = CustomPagination\n filter_backends = [filters.OrderingFilter]\n\n\ndef refresh_articles_view(request):\n refresh_articles_job.send()\n return HttpResponse()\n" }, { "alpha_fraction": 0.6434914469718933, "alphanum_fraction": 0.6502057909965515, "avg_line_length": 27.15243911743164, "blob_id": "744089a53008c8d8678d742b896e164ac07c9be3", "content_id": "281c97cce30d69affbc5d7b24303e4262e53040a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4617, "license_type": "no_license", "max_line_length": 91, "num_lines": 164, "path": "/bugger_django/settings.py", "repo_name": "sergeibershadsky/bugger_django", "src_encoding": "UTF-8", "text": "import os\nimport sys\nfrom pathlib import Path\n\n# Build paths inside the project like this: BASE_DIR / 'subdir'.\nimport redis\n\nBASE_DIR = Path(__file__).resolve(strict=True).parent.parent\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.getenv(\"DJANGO_SECRET\")\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = bool(int(os.getenv(\"DEBUG\")))\n\nALLOWED_HOSTS = os.getenv(\"ALLOWED_HOSTS\", '*').split(',')\n\n# Application definition\n\nINSTALLED_APPS = [\n \"django_dramatiq\",\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'rest_framework',\n 'articles.apps.ArticlesConfig',\n \"django_apscheduler\",\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'bugger_django.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'bugger_django.wsgi.application'\n\n# Database\n# https://docs.djangoproject.com/en/3.1/ref/settings/#databases\n\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql',\n 'NAME': os.getenv(\"DATABASE\"),\n 'USER': os.getenv(\"DB_USER\"),\n 'PASSWORD': os.getenv(\"DB_PASSWORD\"),\n 'HOST': os.getenv(\"DB_HOST\"),\n 'PORT': os.getenv(\"DB_PORT\"),\n }\n}\n\n# Password validation\n# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n# Internationalization\n# https://docs.djangoproject.com/en/3.1/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/3.1/howto/static-files/\n\nSTATIC_URL = '/static/'\n\nREST_FRAMEWORK = {\n 'DEFAULT_PERMISSION_CLASSES': [\n 'rest_framework.permissions.AllowAny'\n ],\n 'DEFAULT_RENDERER_CLASSES': [\n 'rest_framework.renderers.JSONRenderer',\n ],\n \"PAGE_SIZE\": 5,\n \"ORDERING_PARAM\": \"order\",\n \"DEFAULT_PAGINATION_CLASS\": 'articles.pagination.CustomPagination'\n}\n\nDRAMATIQ_REDIS_URL = os.getenv(\"REDIS_URL\", \"redis://127.0.0.1:6379/0\")\nDRAMATIQ_BROKER = {\n \"BROKER\": \"dramatiq.brokers.redis.RedisBroker\",\n \"OPTIONS\": {\n \"connection_pool\": redis.ConnectionPool.from_url(DRAMATIQ_REDIS_URL),\n },\n \"MIDDLEWARE\": [\n \"dramatiq.middleware.AgeLimit\",\n \"dramatiq.middleware.TimeLimit\",\n \"dramatiq.middleware.Retries\",\n \"django_dramatiq.middleware.AdminMiddleware\",\n \"django_dramatiq.middleware.DbConnectionsMiddleware\",\n ]\n}\n\nTESTING = len(sys.argv) > 1 and sys.argv[1] == 'test'\n\nif TESTING:\n DATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': ':memory:',\n }\n }\n DRAMATIQ_BROKER = {\n \"BROKER\": \"dramatiq.brokers.stub.StubBroker\",\n \"OPTIONS\": {},\n \"MIDDLEWARE\": [\n \"dramatiq.middleware.AgeLimit\",\n \"dramatiq.middleware.TimeLimit\",\n \"dramatiq.middleware.Callbacks\",\n \"dramatiq.middleware.Pipelines\",\n \"dramatiq.middleware.Retries\",\n \"django_dramatiq.middleware.AdminMiddleware\",\n \"django_dramatiq.middleware.DbConnectionsMiddleware\",\n ]\n }\n" }, { "alpha_fraction": 0.5429650545120239, "alphanum_fraction": 0.5712936520576477, "avg_line_length": 15.045454978942871, "blob_id": "1cf85124d42ddfa7f462f54a0e90b45d62df08f0", "content_id": "4d7fc5240307f298f1382179409f97f0dc150be4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "YAML", "length_bytes": 1059, "license_type": "no_license", "max_line_length": 72, "num_lines": 66, "path": "/docker-compose.yml", "repo_name": "sergeibershadsky/bugger_django", "src_encoding": "UTF-8", "text": "version: '3'\n\nservices:\n web:\n build: .\n volumes:\n - .:/code\n depends_on:\n - redis\n - database\n env_file:\n - .env\n expose:\n - 8000\n command: gunicorn bugger_django.wsgi:application --bind 0.0.0.0:8000\n restart: always\n\n redis:\n image: redis\n container_name: cache\n expose:\n - 6379\n restart: always\n\n apscheduler:\n build: .\n command: python manage.py runapscheduler\n depends_on:\n - database\n env_file:\n - .env\n restart: always\n\n dramatiq:\n build: .\n command: python manage.py rundramatiq\n depends_on:\n - redis\n - database\n env_file:\n - .env\n restart: always\n\n database:\n image: \"postgres:latest\"\n container_name: database\n restart: always\n ports:\n - 54320:5432\n environment:\n - POSTGRES_PASSWORD=postgres\n volumes:\n - database_data:/var/lib/postgresql/data\n\n nginx:\n build:\n context: ./nginx\n ports:\n - 80:80\n depends_on:\n - web\n restart: always\n\n\nvolumes:\n database_data:\n" }, { "alpha_fraction": 0.75789475440979, "alphanum_fraction": 0.8105263113975525, "avg_line_length": 20.11111068725586, "blob_id": "0274a2d26f8d76fe6dde48e1f893793bf51d00b9", "content_id": "838ef21b9a549727025f3563c5ed91b82c3ec7f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 190, "license_type": "no_license", "max_line_length": 37, "num_lines": 9, "path": "/.env.example", "repo_name": "sergeibershadsky/bugger_django", "src_encoding": "UTF-8", "text": "REDIS_URL=redis://cache:6379/0\nDB_HOST=database\nDATABASE=postgres\nDB_USER=postgres\nDB_PASSWORD=postgres\nDB_PORT=5432\nDJANGO_SECRET=super-secret\nDEBUG=1\nALLOWED_HOSTS=google.com,amazon.com,*\n" }, { "alpha_fraction": 0.7486631274223328, "alphanum_fraction": 0.7700534462928772, "avg_line_length": 16, "blob_id": "b2d734986682e07220de4a923c69cf97b2588831", "content_id": "4d8658176845e1e74172e0cc4c0485287e930735", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 187, "license_type": "no_license", "max_line_length": 57, "num_lines": 11, "path": "/Dockerfile", "repo_name": "sergeibershadsky/bugger_django", "src_encoding": "UTF-8", "text": "FROM python:3.8\n\nENV PYTHONDONTWRITEBYTECODE 1\nENV PYTHONUNBUFFERED 1\n\nWORKDIR /code\n\nCOPY requirements.txt /code\nRUN pip install -U pip && pip install -r requirements.txt\n\nCOPY . /code/\n" }, { "alpha_fraction": 0.7021791934967041, "alphanum_fraction": 0.7021791934967041, "avg_line_length": 12.322580337524414, "blob_id": "ea1fe454169a95f5e12ec4e270f5073460513f67", "content_id": "e8ff47814b621424d4f726d55c7bbb2ac3428555", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 413, "license_type": "no_license", "max_line_length": 48, "num_lines": 31, "path": "/README.md", "repo_name": "sergeibershadsky/bugger_django", "src_encoding": "UTF-8", "text": "# BuggerNews django\n\n### Setup\n\n```bash\ncp .env.example .env\nvim .env\ndocker-compose build\ndocker-compose up -d\ndocker-compose exec web python manage.py migrate\n```\n\n###\n\nTo get fresh articles:\n\n```bash\ncurl -X http://localhost/posts/\n```\n\nor using httpie\n```bash\nhttp http://localhost/posts/\n```\n\nForce refresh articles\n```bash\ncurl -X http://localhost/force-refresh/\nor\nhttp http://localhost/force-refresh/\n```\n" }, { "alpha_fraction": 0.7183098793029785, "alphanum_fraction": 0.7183098793029785, "avg_line_length": 20.299999237060547, "blob_id": "706c16ed98487615f583b0147e6c0cf89dfcef7e", "content_id": "f6d2b0883804f724679bcb1f7845312c6887b888", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 213, "license_type": "no_license", "max_line_length": 62, "num_lines": 10, "path": "/articles/factories.py", "repo_name": "sergeibershadsky/bugger_django", "src_encoding": "UTF-8", "text": "import factory.fuzzy\n\nfrom .models import Article\n\n\nclass ArticleFactory(factory.django.DjangoModelFactory):\n class Meta:\n model = Article\n\n url = factory.Sequence(lambda link: f\"https://{link}.com\")\n" }, { "alpha_fraction": 0.644444465637207, "alphanum_fraction": 0.6558139324188232, "avg_line_length": 33.55356979370117, "blob_id": "9f65f0ab02a816eca9c3556893a641cef1bf9bab", "content_id": "7e19c583b20ab51d268fe82794a991708858a2d5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1935, "license_type": "no_license", "max_line_length": 108, "num_lines": 56, "path": "/articles/tests.py", "repo_name": "sergeibershadsky/bugger_django", "src_encoding": "UTF-8", "text": "from typing import List\n\nfrom django.urls import reverse\nfrom django.utils.crypto import get_random_string\nfrom django_dramatiq.test import DramatiqTestCase\nfrom rest_framework import status\nfrom rest_framework.test import APITestCase\nfrom .factories import ArticleFactory\nfrom .tasks import refresh_articles_job\nfrom .models import Article\nfrom unittest.mock import patch\n\n\ndef fake_articles():\n return [\n {'title': get_random_string(),\n 'url': f'http:{get_random_string()}.su'}\n for _ in range(30)\n ]\n\n\nclass ArticlesTestCase(APITestCase):\n\n def setUp(self):\n for _ in range(30):\n ArticleFactory()\n\n def test_retrieve_articles(self):\n url = reverse('posts')\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n data: List[dict] = response.json()\n self.assertEqual(len(data), 5, \"Default response length\")\n self.assertSetEqual(set(data[0].keys()), {'id', 'url', 'created', 'title'}, \"Should contain fields\")\n\n limit = 3\n offset = 1\n response = self.client.get(f'{url}?limit={limit}&offset={offset}')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n data: List[dict] = response.json()\n self.assertEqual(len(data), limit, \"Custom limit\")\n self.assertListEqual([el['id'] for el in data], [2, 3, 4], \"Limited and sorted response\")\n\n\nclass ScraperTestCase(DramatiqTestCase):\n def test_refresh_articles(self):\n url = reverse('force-refresh')\n with patch('articles.tasks.get_articles') as mock_get_articles:\n mock_get_articles.return_value = fake_articles()\n response = self.client.get(url)\n # refresh_articles_job.send()\n self.assertEqual(response.status_code, 200)\n self.broker.join(\"default\")\n self.worker.join()\n\n self.assertEqual(len(Article.objects.all()), 30)\n" } ]
10
aShrewt/Programming
https://github.com/aShrewt/Programming
e020e934ef0ea2f2ace942228f5b1918d3fbc274
ed24d60e29048b404cc4c7de8f43dfdc08121703
81ae60c82b75e83b05c264165164dc68e1ffe006
refs/heads/master
2023-02-10T11:53:40.334333
2021-01-07T18:14:19
2021-01-07T18:14:19
317,298,210
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.45035460591316223, "alphanum_fraction": 0.4822694957256317, "avg_line_length": 11.863636016845703, "blob_id": "d829a73afbc4d35052f8736c4f18b4248334b876", "content_id": "44a422a237a7f02e4b4e3279ac35d1561e99d4bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 282, "license_type": "no_license", "max_line_length": 24, "num_lines": 22, "path": "/new .py", "repo_name": "aShrewt/Programming", "src_encoding": "UTF-8", "text": "import time\n\nstart = time.time()\n\nstring = 'abc xyz'\nstring = string.upper()\n\na = []\n\nfor x in string:\n if 65 <=ord(x) <=90:\n x = ord(x) + 2\n if x > 90:\n x -= 26\n x = chr(x)\n a.append(x)\n\nprint(''.join(a))\n\nend = time.time()\n\nprint(end - start)" }, { "alpha_fraction": 0.6217948794364929, "alphanum_fraction": 0.6410256624221802, "avg_line_length": 24.66666603088379, "blob_id": "d749060ed04d36f718489583b041a42c09cef827", "content_id": "230bab9a19d2e75807e28587d53226d1d98911bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 156, "license_type": "no_license", "max_line_length": 61, "num_lines": 6, "path": "/cool.py", "repo_name": "aShrewt/Programming", "src_encoding": "UTF-8", "text": "\ndef pizza(*toppings):\n print(toppings[0])\n print(toppings[1])\n print(toppings[2])\n\npizza(\"Ham\",\"Pineapple\",\"Onion\",\"Cheese\",\"Bacon\",\"Pepperoni\")\n\n" }, { "alpha_fraction": 0.4850887656211853, "alphanum_fraction": 0.515428364276886, "avg_line_length": 29.69841194152832, "blob_id": "6e6f4d22bcfb96726ff1bd0512b2641dda8b6d6b", "content_id": "bbdf2abf22fc037b3f0ed7e51950a5f2b245ffe0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5801, "license_type": "no_license", "max_line_length": 100, "num_lines": 189, "path": "/Menu.py", "repo_name": "aShrewt/Programming", "src_encoding": "UTF-8", "text": "#Making a menu\n\nimport os\n\n\nclass colours:\n BLUE = '\\033[94m'\n GREEN = '\\033[92m'\n RED = '\\033[31m'\n PINK = '\\033[35m'\n BOLD = '\\033[1m'\n END = '\\033[0m'\n\ndef MeterToYard():\n meter = float(input(\"Input the amount of meters: \"))\n yard = meter*1.093613\n print(meter,\"meters is\",yard,\"yards! \")\n print()\n\ndef YardToMeter():\n yard = float(input(\"Input the amount of yards: \"))\n meter = yard/1.093613\n print(yard,\"yards is\",meter,\"meters! \")\n print()\n\ndef KiloToLbs():\n kilo = float(input(\"Input the amount of kilograms: \"))\n Lbs = kilo*2.20462\n print(kilo,\"kilometers is\",Lbs,\"pounds! \")\n print()\n\ndef LbsToKilo():\n Lbs = float(input(\"Input the amount of pounds: \"))\n Kilo = Lbs/2.20462\n print(Lbs,\"pounds is\",Kilo,\"kilometers! \")\n print()\n\ndef CelsiusToFahrenheit():\n Cel = float(input(\"Input the temperature in celsius: \"))\n F = Cel*(9/5)+32\n print(Cel,\"celsius is\",F,\"fahrenheit! \")\n print()\n\ndef FarenheitToCelsius():\n F = float(input(\"Input the temperature in farenheit: \"))\n Cel = (F-32)*(5/9)\n print(F,\"fahrenheit is\",Cel,\"celsius! \")\n print()\n\ndef KMHToMPH():\n Kilometer = float(input(\"Input the amount of kilometers per hour: \"))\n Mile = Kilometer*0.621371\n print(Kilometer,\"kilometers an hour is\",Mile,\"miles an hour!\")\n print()\n\ndef MPHToKMH():\n Mile = float(input(\"Input the amount of miles per hour: \"))\n Kilometer = Mile/0.621371\n print(Mile,\"miles an hour is\",Kilometer,\"kilometers an hour\")\n print()\n\nclear = lambda: os.system('cls')\nuserinput = \"0\"\n\nwhile userinput != \"5\":\n print()\n print(colours.BOLD+\"==MENU==\".center(30)+colours.END)\n print()\n print(colours.BLUE+\"1.\",\"Meters and Yards\".rjust(19,\" \")+colours.END)\n print(colours.GREEN+\"2.\",\"Kilograms and Pounds\".rjust(23,\" \")+colours.END)\n print(colours.RED+\"3.\",\"Celsius and Fahrenheit\".rjust(25,\" \")+colours.END)\n print(colours.PINK+\"4.\",\"Km/H and MPH\".rjust(15,\" \")+colours.END)\n print(colours.BOLD+\"5.\",\"Quit\".rjust(7,\" \")+colours.END)\n print()\n userinput = str(input(\"Select an option (1-5): \"))\n clear()\n\n if userinput == \"1\":\n print(colours.BLUE+\"1. \",\"Convert Meters to Yards\".rjust(25, \" \"))\n print(\"2.\",\"Convert Yards to Meters\".rjust(26,\" \"))\n print(\"3.\",\"Go back to menu\".rjust(18,\" \")+colours.END)\n check = input(\"Select an option (1-3): \")\n\n if check == \"1\":\n MeterToYard()\n print()\n check2 = input(colours.BOLD+\"Would you like to return to the menu? (Y/N): \"+colours.END)\n print()\n if check2 == \"Y\":\n continue\n else:\n break\n\n elif check == \"2\":\n YardToMeter()\n print()\n check2 = input(colours.BOLD+\"Would you like to return to the menu? (Y/N): \"+colours.END)\n print()\n if check2 == \"Y\":\n continue\n elif check2 ==\"N\":\n break\n\n elif check == \"3\":\n continue\n \n elif userinput == \"2\":\n print(colours.GREEN+\"1.\",\"Convert kilograms to pounds\".rjust(30,\" \"))\n print(\"2.\",\"Convert pounds to Kilograms\".rjust(30,\" \"))\n print(\"3.\",\"Go back to menu\".rjust(18,\" \")+colours.END)\n check = input(\"Select an option (1-3): \")\n if check == \"1\":\n KiloToLbs()\n print()\n check2 = input(colours.BOLD+\"Would you like to return to the menu? (Y/N): \"+colours.END)\n print()\n if check2 == \"Y\":\n continue\n else:\n break\n\n elif check == \"2\":\n LbsToKilo()\n print()\n check2 = input(colours.BOLD+\"Would you like to return to the menu? (Y/N): \"+colours.END)\n print()\n if check2 == \"Y\":\n continue\n elif check2 ==\"N\":\n break\n\n elif check == \"3\":\n continue\n\n elif userinput == \"3\":\n print(colours.RED+\"1.\",\"Convert celsius to fahrenheit\".rjust(32,\" \"))\n print(\"2.\",\"Convert fahrenheit to celsius\".rjust(32,\" \"))\n print(\"3.\",\"Go back to menu\".rjust(18,\" \")+colours.END)\n check = input(\"Select an option (1-3): \")\n if check == \"1\":\n CelsiusToFahrenheit()\n print()\n check2 = input(colours.BOLD+\"Would you like to return to the menu? (Y/N): \"+colours.END)\n print()\n if check2 == \"Y\":\n continue\n else:\n break\n\n elif check == \"2\":\n FarenheitToCelsius()\n print()\n check2 = input(colours.BOLD+\"Would you like to return to the menu? (Y/N): \"+colours.END)\n print()\n if check2 == \"Y\":\n continue\n elif check2 ==\"N\":\n break\n\n elif check == \"3\":\n continue\n\n elif userinput == \"4\":\n print(colours.PINK+\"1.\",\"Convert KMH to MPH \".rjust(22,\" \"))\n print(\"2.\",\"Convert MPH to KMH\".rjust(21,\" \"))\n print(\"3.\",\"Go back to menu\".rjust(18,\" \")+colours.END)\n check = input(\"Select an option (1-3): \")\n if check == \"1\":\n KMHToMPH()\n print()\n check2 = input(colours.BOLD+\"Would you like to return to the menu? (Y/N): \"+colours.END)\n print()\n if check2 == \"Y\":\n continue\n else:\n break\n\n elif check == \"2\":\n MPHToKMH()\n print()\n check2 = input(colours.BOLD+\"Would you like to return to the menu? (Y/N): \"+colours.END)\n print()\n if check2 == \"Y\":\n continue\n elif check2 ==\"N\":\n break\n\n elif check == \"3\":\n continue" }, { "alpha_fraction": 0.5313653349876404, "alphanum_fraction": 0.5867158770561218, "avg_line_length": 13.315789222717285, "blob_id": "1df3cbad08898a2ac27ab0857b76df620e6d3ef7", "content_id": "b2b7e7391357efc191cf727010ff6718813b95cb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 271, "license_type": "no_license", "max_line_length": 29, "num_lines": 19, "path": "/Change Generator .py", "repo_name": "aShrewt/Programming", "src_encoding": "UTF-8", "text": "import turtle\n\ns = turtle.getscreen()\n\nt = turtle.Turtle()\nt.color(\"green\",\"yellow\")\n\ndef picture():\n t.backward(500)\n for i in range(100):\n t.forward(1000)\n t.left(90)\n t.forward(1)\n t.left(85)\n\n\npicture()\n\nturtle.Screen().exitonclick()" } ]
4
SemonoffArt/coffeebot
https://github.com/SemonoffArt/coffeebot
39e5da829f39ba4f8d6bd7473550577b37babbe1
f94036b4b4aa3b1aa8922176e95fd6fedcdcc9ec
8d71b1d504d70a81c9cef2fd02a469ddfe16e369
refs/heads/master
2021-05-23T19:23:54.602777
2019-11-10T19:11:41
2019-11-10T19:11:41
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.661473274230957, "alphanum_fraction": 0.6694902181625366, "avg_line_length": 33.97883605957031, "blob_id": "f59b3d361b75dcb5723802c767659e743c8fd82a", "content_id": "1d0ed1b2588ffbf98a666911f98b9ba3fb1b0e76", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7170, "license_type": "no_license", "max_line_length": 180, "num_lines": 189, "path": "/cb_db_functions.py", "repo_name": "SemonoffArt/coffeebot", "src_encoding": "UTF-8", "text": "import sqlite3\nimport time\nimport datetime\n\ndef checkUserInQueue(file_db, telegram_id): #Проверка пользователся на нахождение в очереди\n conn = sqlite3.connect(file_db)\n cursor = conn.cursor()\n cursor.execute('SELECT * FROM queue WHERE telegram_id = \"' + str(telegram_id) + '\"')\n data = cursor.fetchone()\n if data == None:\n return False\n else:\n return True\n\n\n#Новый пользователь\ndef newUser(file_db, telegram_id, first_name, last_name):\n conn = sqlite3.connect(file_db)\n cursor = conn.cursor()\n st = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')\n cursor.execute(\"INSERT INTO people (telegram_id, first_name, last_name, TIMESTAMP) VALUES (?,?,?,?)\", (str(telegram_id), first_name, last_name, str(st)))\n conn.commit()\n\n\n#Добавление нового кода регистрации в БД\ndef newCode(file_db, telegram_id, email, code):\n conn = sqlite3.connect(file_db)\n cursor = conn.cursor()\n cursor.execute(\"INSERT INTO codes (telegram_id, email, code) VALUES (?,?,?)\", (str(telegram_id), email, str(code)))\n conn.commit()\n\n\n#Проверка пользователя на наличие регистрации\ndef getCode(file_db, telegram_id):\n conn = sqlite3.connect(file_db)\n cursor = conn.cursor()\n cursor.execute('SELECT * FROM codes WHERE telegram_id = \"' + str(telegram_id) + '\"')\n data = cursor.fetchone()\n return data\n\n\n#Проверка пользователя на наличие регистрации\ndef checkUser(file_db, telegram_id):\n conn = sqlite3.connect(file_db)\n cursor = conn.cursor()\n cursor.execute('SELECT * FROM people WHERE telegram_id = \"' + str(telegram_id) + '\"')\n data = cursor.fetchone()\n if data == None:\n return False\n else:\n return data[2]\n\n\n#Удаление кода\ndef deleteCode(file_db, telegram_id):\n conn = sqlite3.connect(file_db)\n cursor = conn.cursor()\n cursor.execute(\"DELETE FROM codes WHERE telegram_id=\" + str(telegram_id))\n conn.commit()\n return\n\n#Добавление имени во временную таблицу\ndef addTempName(file_db, telegram_id, name):\n conn = sqlite3.connect(file_db)\n cursor = conn.cursor()\n cursor.execute(\"UPDATE codes SET tmp_name=? WHERE telegram_id=?\", (str(name), str(telegram_id)))\n conn.commit()\n return\n\n#Добавление имени во временную таблицу\ndef getTempName(file_db, telegram_id):\n conn = sqlite3.connect(file_db)\n cursor = conn.cursor()\n cursor.execute('SELECT * FROM codes WHERE telegram_id = \"' + str(telegram_id) + '\"')\n data = cursor.fetchone()\n if data == None:\n return False\n else:\n return data[5]\n\n#Получение списка городов\ndef getLocationTowns(file_db):\n conn = sqlite3.connect(file_db)\n cursor = conn.cursor()\n cursor.execute('SELECT DISTINCT location_town FROM locations')\n data = cursor.fetchall()\n return data\n\n#Получение списка адресов\ndef getLocationAddress(file_db, town):\n conn = sqlite3.connect(file_db)\n cursor = conn.cursor()\n cursor.execute('SELECT * FROM locations WHERE location_town = \"' + town + '\"')\n data = cursor.fetchall()\n return data\n\n#Получение инфы по локации\ndef getLocationInfo(file_db, location_id):\n conn = sqlite3.connect(file_db)\n cursor = conn.cursor()\n cursor.execute('SELECT * FROM locations WHERE location_id = \"' + location_id + '\"')\n data = cursor.fetchone()\n return data\n\n#Добавление сотрудника в очередь\ndef add_to_queue(file_db, telegram_id, location_id, photo_flag, face_id):\n if not checkUserInQueue(file_db, telegram_id):\n conn = sqlite3.connect(file_db)\n cursor = conn.cursor()\n cursor.execute(\"INSERT INTO queue (telegram_id, location_id, photo_flag, face_id) VALUES (?,?,?,?)\", (str(telegram_id), str(location_id), photo_flag, face_id))\n conn.commit()\n return\n\n#Удаление сотрудника из очереди\ndef delete_user_from_queue(file_db, telegram_id):\n conn = sqlite3.connect(file_db)\n cursor = conn.cursor()\n cursor.execute(str(\"DELETE FROM queue WHERE telegram_id = '{0}'\".format(str(telegram_id))))\n conn.commit()\n return\n\n#Проверка статуса диалога\ndef checkActiveDialog(file_db, telegram_id):\n conn = sqlite3.connect(file_db)\n cursor = conn.cursor()\n cursor.execute('SELECT * FROM couples WHERE (telegram_id_1 = \"' + str(telegram_id) + '\" OR telegram_id_2 = \"' + str(telegram_id) + '\") AND active_dialog=1')\n data = cursor.fetchone()\n return data\n\n#Получение id компаньона\ndef getCompanionId(file_db, telegram_id):\n conn = sqlite3.connect(file_db)\n cursor = conn.cursor()\n cursor.execute('SELECT * FROM couples WHERE (telegram_id_1 = \"' + str(telegram_id) + '\" OR telegram_id_2 = \"' + str(telegram_id) + '\") AND active_dialog=1')\n data = cursor.fetchone()\n if data[2] == str(telegram_id):\n return data[3]\n else:\n return data[2]\n\n#Остановка диалога\ndef disableDialog(file_db, telegram_id):\n conn = sqlite3.connect(file_db)\n cursor = conn.cursor()\n cursor.execute(\"UPDATE couples SET active_dialog=0 WHERE (telegram_id_1=? OR telegram_id_2=?) AND active_dialog=1\", (str(telegram_id), str(telegram_id)))\n conn.commit()\n return\n\n#Получение инфо о сотруднике\ndef getInfoByTelegramId(file_db, telegram_id):\n conn = sqlite3.connect(file_db)\n cursor = conn.cursor()\n cursor.execute('SELECT * FROM people WHERE telegram_id = \"' + str(telegram_id) + '\"')\n data = cursor.fetchone()\n return {\n 'name': data[2],\n 'location_id': data[5]\n }\n\n#Обновление локации у сотрудника\ndef updateLocation(file_db, telegram_id, location_id):\n conn = sqlite3.connect(file_db)\n cursor = conn.cursor()\n cursor.execute(\"UPDATE people SET location_id=? WHERE telegram_id=?\", (str(location_id), str(telegram_id)))\n conn.commit()\n return\n\n#Объединение сотрудников в пару\ndef add_to_couple(file_db, telegram_id_1, telegram_id_2, location_id):\n conn = sqlite3.connect(file_db)\n cursor = conn.cursor()\n cursor.execute(\"INSERT INTO couples (telegram_id_1, telegram_id_2, location_id, active_dialog) VALUES (?,?,?,?)\", (str(telegram_id_1), str(telegram_id_2), str(location_id), 1))\n conn.commit()\n return\n\n#Удаление сотрудников из очереди\ndef delete_from_queue(file_db, queue_id_1, queue_id_2):\n conn = sqlite3.connect(file_db)\n cursor = conn.cursor()\n cursor.execute(str(\"DELETE FROM queue WHERE queue_id IN ({0}, {1})\".format(queue_id_1, queue_id_2)))\n conn.commit()\n return\n\n#Получение инфо о сотруднике\ndef getUserInfo(file_db, telegram_id):\n conn = sqlite3.connect(file_db)\n cursor = conn.cursor()\n cursor.execute('SELECT * FROM people WHERE telegram_id = \"' + str(telegram_id) + '\"')\n return cursor.fetchone()\n" }, { "alpha_fraction": 0.684684693813324, "alphanum_fraction": 0.6914414167404175, "avg_line_length": 43.400001525878906, "blob_id": "266448eeceefec9d8451d25a48cc1f3d659ea037", "content_id": "b8a1f070c259995c862c6e7492b180859cbeb26b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 444, "license_type": "no_license", "max_line_length": 150, "num_lines": 10, "path": "/cb_email.py", "repo_name": "SemonoffArt/coffeebot", "src_encoding": "UTF-8", "text": "import smtplib\n\n\ndef send_code (email_smtp_server, email_login, email_pwd, email_to, email_code):\n smtpObj = smtplib.SMTP_SSL(email_smtp_server, 465)\n smtpObj.login(email_login,email_pwd)\n receivers = [email_to]\n message = 'To: {}\\nFrom: {}\\nSubject: {}\\n\\nYour code for CoffeeBot: {}'.format(email_to, email_login, \"Your code for CoffeeBot\", str(email_code))\n smtpObj.sendmail(email_login, email_to, message)\n smtpObj.quit()\n" }, { "alpha_fraction": 0.6123492121696472, "alphanum_fraction": 0.6174606680870056, "avg_line_length": 41.16379165649414, "blob_id": "b0bbbc718e5561e06d6985c3429083fff901ac25", "content_id": "44fca0709e93b6ee7d4e5003953298e85efe7db3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5679, "license_type": "no_license", "max_line_length": 141, "num_lines": 116, "path": "/cb_azure.py", "repo_name": "SemonoffArt/coffeebot", "src_encoding": "UTF-8", "text": "import requests\nimport json\n\ndef getInfoByPhoto(endpoint_url, subscription_key, imagepath):\n # set to your own subscription key value\n assert subscription_key\n\n # replace <My Endpoint String> with the string from your endpoint URL\n face_api_url = endpoint_url + 'detect'\n\n headers = {'Ocp-Apim-Subscription-Key': subscription_key, 'Content-Type': 'application/octet-stream'}\n\n params = {\n 'returnFaceId': 'true',\n 'returnFaceLandmarks': 'false',\n 'returnFaceAttributes': 'age,gender,headPose,smile,facialHair,glasses,emotion,hair,makeup,occlusion,accessories,blur,exposure,noise',\n }\n\n #response = requests.post(face_api_url, params=params,\n # headers=headers, json={\"url\": image_url})\n\n data = open(imagepath, 'rb').read()\n response = requests.post(url=face_api_url,\n data=data,\n params=params,\n headers=headers)\n output = {\"faceId\": response.json()[0]['faceId']}\n\n #print(json.dumps(response.json()))\n# if response.json()[0]['faceAttributes']['gender'] == \"male\":\n# output.update({'gender' : \"Ты мужчина\"})\n# else:\n# output.update({'gender' : \"Ты женщина\"})\n\n# output.update({'age' : \"Тебе \" + str(int(response.json()[0]['faceAttributes']['age']))})\n\n A = response.json()[0]['faceAttributes']['emotion']\n dominateEmotion = [\"neutral\", 0.0]\n emotions = {\"anger\": \"злое настроение\",\n \"contempt\": \"чувство презрения\",\n \"disgust\": \"чувство отвращенния\",\n \"fear\": \"чувство страха\",\n \"happiness\": \"хорошее настроение\",\n \"neutral\": \"нейтральное настроение\",\n \"sadness\": \"плохое настроение\",\n \"surprise\": \"удивленное настроение\"\n }\n\n for key in A:\n if A[key] > dominateEmotion[1]:\n dominateEmotion = [key, A[key]]\n\n #output.update({'dominateEmotion' : \"У тебя \" + emotions[dominateEmotion[0]]})\n\n if dominateEmotion[0] == \"anger\":\n if response.json()[0]['faceAttributes']['gender'] == \"male\":\n output.update({'text' : \"Чтобы успокоить нервы, специалисты рекомендуют Зелёный чай!\"})\n else:\n output.update({'text' : \"Чтобы успокоить нервы, специалисты рекомендуют Зелёный чай!\"})\n elif dominateEmotion[0] == \"contempt\":\n if response.json()[0]['faceAttributes']['gender'] == \"male\":\n output.update({'text' : \"С таким выражением лица выпей-ка вот это - Американо!\"})\n else:\n output.update({'text' : \"С таким выражением лица выпей-ка вот это - Эспрессо!\"})\n elif dominateEmotion[0] == \"disgust\":\n if response.json()[0]['faceAttributes']['gender'] == \"male\":\n output.update({'text' : \"Что это за кислое выражение лица, сейчас исправь его вот этим Капучино!\"})\n else:\n output.update({'text' : \"Что это за кислое выражение лица, сейчас исправь его вот этим Раф кофе!\"})\n elif dominateEmotion[0] == \"fear\":\n if response.json()[0]['faceAttributes']['gender'] == \"male\":\n output.update({'text' : \"Не бойся, просто выпей Эспрессо!\"})\n else:\n output.update({'text' : \"Не бойся, просто выпей Американо!\"})\n elif dominateEmotion[0] == \"happiness\":\n if response.json()[0]['faceAttributes']['gender'] == \"male\":\n output.update({'text' : \"Всё и так хорошо, но может быть лучше с Латте!\"})\n else:\n output.update({'text' : \"Всё и так хорошо, но может быть лучше с Латте!\"})\n elif dominateEmotion[0] == \"neutral\":\n if response.json()[0]['faceAttributes']['gender'] == \"male\":\n output.update({'text' : \"Всё понял, тебе нужно Капучино!\"})\n else:\n output.update({'text' : \"Всё понял, тебе Капучино!\"})\n elif dominateEmotion[0] == \"sadness\":\n if response.json()[0]['faceAttributes']['gender'] == \"male\":\n output.update({'text' : \"Не грусти, лучше выпей Горячий Шоколад!\"})\n else:\n output.update({'text' : \"Не грусти, лучше выпей Горячий Шоколад!\"})\n elif dominateEmotion[0] == \"surprise\":\n if response.json()[0]['faceAttributes']['gender'] == \"male\":\n output.update({\"text\":\"Ты такой удивленный! Возможно тебя удивит и кружка Мокко!\"})\n else:\n output.update({\"text\":\"Ты такая удивленная! Возможно тебя удивит и кружка Мокко!\"})\n\n return output\n\n\ndef useFaceApiFindSimilar(urlService,keyService,faceId,faceIds):\n\theaders = {\"Ocp-Apim-Subscription-Key\": keyService, \"Content-Type\": \"application/json\"}\n\n\tpostData = {\"faceId\": faceId,\n\t\t\t\t\"faceids\":faceIds,\n\t\t\t\t\"mode\": \"matchFace\"}\n\n\tresponce = requests.post(urlService,headers=headers,data=json.dumps(postData))\n\tresult = json.loads(responce.text)\n\n\tbestCandidate = None\n\n\tfor index,value in enumerate(result):\n\t\ti = value['confidence']\n\t\tif i > result[index-1]['confidence']:\n\t\t\tbestCandidate = value\n\n\treturn bestCandidate\n" }, { "alpha_fraction": 0.583747923374176, "alphanum_fraction": 0.6058595776557922, "avg_line_length": 36.6875, "blob_id": "eac20127f3e35e36c697566a18c7b5d82e2e6155", "content_id": "1ae2d50a1d5e256920026423ed4dbae0ea0bb612", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1832, "license_type": "no_license", "max_line_length": 199, "num_lines": 48, "path": "/cb_locations.py", "repo_name": "SemonoffArt/coffeebot", "src_encoding": "UTF-8", "text": "import requests\nimport json\nimport urllib3\n\nfrom math import radians, cos, sin, asin, sqrt, ceil\n\ndef distance(lat1, lon1, lat2, lon2):\n \"\"\"\n Calculate the great circle distance between two points\n on the earth (specified in decimal degrees)\n \"\"\"\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a))\n km = 6367 * c\n return km\n\n\ndef getLocations(location_longitude, location_latitide, google_api_key, max_view):\n urllib3.disable_warnings()\n s = requests.Session()\n s.keep_alive = True\n face_api_url = str(\"https://maps.googleapis.com/maps/api/place/nearbysearch/json?location={0},{1}&type=cafe&rankby=distance&key={2}\".format(location_latitide, location_longitude, google_api_key))\n response = s.get(face_api_url, verify=False).json()\n\n i = 1\n output = \"\"\n for result in response['results']:\n dist = distance(float(location_longitude), float(location_latitide), float(result['geometry']['location']['lng']), float(result['geometry']['location']['lat']))\n dist = ceil(dist * 1000)\n map_link = str(\"https://www.google.com/maps/search/?api=1&query=Google&query_place_id=\" + str(result['place_id']))\n\n if \"rating\" in result:\n output = output + result['name'] + \" ⭐️ \" + str(round(result['rating'], 1)) + \" (\"+ str(dist) + \" м.)\\n\"\n else:\n output = output + result['name'] + \" (\"+ str(dist) + \" м.)\\n\"\n output = output + str(\"[Посмотреть на карте](\"+ map_link +\")\") + \"\\n\"\n\n if i == max_view:\n break\n output = output + str(\"\\n\")\n i = i + 1\n\n return output\n" }, { "alpha_fraction": 0.48342999815940857, "alphanum_fraction": 0.528997540473938, "avg_line_length": 35.029850006103516, "blob_id": "7f251a5bbf9407b4dd5dcb9428834c55ba6c635d", "content_id": "d4e71373fd404bac5058e1d65ef3700946610c8c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2565, "license_type": "no_license", "max_line_length": 148, "num_lines": 67, "path": "/create_db.py", "repo_name": "SemonoffArt/coffeebot", "src_encoding": "UTF-8", "text": "# Вспомогательный файл для создания таблиц в БД\nimport sqlite3\n\nconn = sqlite3.connect(\"cb.db\") # или :memory: чтобы сохранить в RAM\ncursor = conn.cursor()\n\ndef cr_t_people():\n cursor.execute(\"\"\"CREATE TABLE people (\n person_id INTEGER PRIMARY KEY AUTOINCREMENT,\n telegram_id,\n first_name TEXT,\n last_name TEXT,\n location_id TEXT,\n TIMESTAMP TEXT\n )\n \"\"\")\ndef cr_t_locations():\n cursor.execute(\"\"\"CREATE TABLE locations (\n location_id INTEGER PRIMARY KEY AUTOINCREMENT,\n location_town TEXT,\n location_address TEXT,\n location_longitude TEXT,\n location_latitide TEXT\n )\n \"\"\")\n #Добавление локаций\n '''\n locations = [('Москва', 'ул. Электрозаводская, 27, стр. 8', '37.70560749999999', '55.79158206894956'),\n ('Москва', 'ул. Электрозаводская, 27, стр. 9', '37.707026499999984', '55.791349068948946'),\n ('Москва', 'ул. Летниковская, 2, стр. 3', '37.64340799999999', '55.72898606902708')]\n\n cursor.executemany(\"INSERT INTO locations (location_town, location_address, location_longitude, location_latitide) VALUES (?,?,?,?)\", locations)\n conn.commit()\n '''\n\ndef cr_t_queues():\n cursor.execute(\"\"\"CREATE TABLE queue (\n queue_id INTEGER PRIMARY KEY AUTOINCREMENT,\n TIMESTAMP NULL DEFAULT CURRENT_TIMESTAMP,\n telegram_id TEXT,\n location_id TEXT,\n photo_flag INT,\n face_id TEXT\n )\n \"\"\")\n\ndef cr_t_couples():\n cursor.execute(\"\"\"DROP TABLE couples\"\"\")\n cursor.execute(\"\"\"CREATE TABLE couples (\n couple_id INTEGER PRIMARY KEY AUTOINCREMENT,\n TIMESTAMP NULL DEFAULT CURRENT_TIMESTAMP,\n telegram_id_1 TEXT,\n telegram_id_2 TEXT,\n location_id TEXT,\n active_dialog INT\n )\n \"\"\")\ndef cr_t_codes():\n cursor.execute(\"\"\"CREATE TABLE codes (\n code_id INTEGER PRIMARY KEY AUTOINCREMENT,\n TIMESTAMP NULL DEFAULT CURRENT_TIMESTAMP,\n telegram_id TEXT,\n email TEXT,\n code TEXT,\n tmp_name TEXT\n )\n \"\"\")\n" }, { "alpha_fraction": 0.7659574747085571, "alphanum_fraction": 0.771276593208313, "avg_line_length": 35.3870964050293, "blob_id": "f494b83c66960565a5351b95f13c1dc6a0e91c09", "content_id": "ea31db92041f3a35f967786ba927c679a43b844f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1607, "license_type": "no_license", "max_line_length": 117, "num_lines": 31, "path": "/README.md", "repo_name": "SemonoffArt/coffeebot", "src_encoding": "UTF-8", "text": "# CoffeeBot\n\n### Настройка необходимых переменных и директорий\nПредварительно необходимо установить [Python 3](https://www.python.org/getit/).\nСоздайте директорию для проекта и распакуйте в нее файлы из данного репозитория.\nЗаполните переменные в файле cb_telegram.py:\n - telegram_token - токен_телеграм\n - ms_token - токен для Microsoft Azure Cognitive Services\n - email_smtp_server - SMTP сервер\n - email_login - логин для УЗ почтового сервера\n - email_pwd - пароль для УЗ почтового сервера\n\nСоздайте 2 директории для обмена фото:\n - photos\n - sent_images\n### Создание окружения и установка библиотек\nДля создания окружения и установки необходимых библиотек, перейдите в католог с проектом coffebot и выполните команды\n```sh\n$ python3 -m venv cb_env\n$ source cb_env/bin/activate - для Unix\n$ cb_env\\Scripts\\activate.bat - для Windows\n$ pip install python-telegram-bot\n$ pip install urllib3\n$ python cb_telegram.py\n```\n### Запуск\nДля запуска проекта выполните запуск файла cb_telegram.py\n```sh\n$ python cb_telegram.py\n```\n[Инструкция пользователя CoffeeBot](https://github.com/ko90/cofeebot/blob/master/instruction.pdf)\n" }, { "alpha_fraction": 0.5579288005828857, "alphanum_fraction": 0.5668408870697021, "avg_line_length": 55.221988677978516, "blob_id": "e476b2f7346ca5ea1f5071990871eba2f27cd138", "content_id": "fa9a7e75c166e1aab36820ee2dfa1f8577c0aa27", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 30283, "license_type": "no_license", "max_line_length": 282, "num_lines": 473, "path": "/cb_telegram.py", "repo_name": "SemonoffArt/coffeebot", "src_encoding": "UTF-8", "text": "import telebot\nimport sqlite3\nimport random\nimport sched, time\nfrom re import *\nfrom telebot import types\nfrom telebot.types import InlineKeyboardMarkup, InlineKeyboardButton\nfrom math import radians, cos, sin, asin, sqrt\nimport threading\nfrom threading import Thread, Event\nfrom timeit import Timer\nimport datetime\nfrom cb_azure import *\nfrom cb_db_functions import *\nfrom cb_email import *\nfrom cb_locations import *\n\n\ntelegram_token = \"\" #токен_телеграм\nms_token = \"\" #токен для microsoft azure cs\nemail_smtp_server = \"\" #SMTP сервер\nemail_login = \"\" #логин для УЗ почтового сервера\nemail_pwd = \"\" #пароль для УЗ почтового сервера\n\n\nbot = telebot.TeleBot(telegram_token)\nfile_db = \"cb.db\"\nconn = sqlite3.connect(file_db)\ncursor = conn.cursor()\nname = ''\nsurname = ''\nemail = ''\ncode = ''\nlocation_id = \"\"\n\n\nclass cbClass():\n global bot\n global ms_token\n global email_smtp_server\n global email_login\n global email_pwd\n def __init__(self):\n pass\n\n def cbFunc(self):\n file_db = \"cb.db\"\n conn = sqlite3.connect(file_db)\n cursor = conn.cursor()\n global name\n global surname\n global email\n global location_id\n\n #Определние дистанции\n def distance(lat1, lon1, lat2, lon2):\n \"\"\"\n Calculate the great circle distance between two points\n on the earth (specified in decimal degrees)\n \"\"\"\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a))\n km = 6367 * c\n return km\n\n #Определение ближайшей локации\n def nearestLocation(lon,lat):\n conn = sqlite3.connect(file_db)\n cursor = conn.cursor()\n cursor.execute('SELECT * FROM locations')\n data = cursor.fetchall()\n nl = {}\n for location in data:\n dist = distance(float(lon), float(lat), float(location[4]), float(location[3]))\n if nl == {}:\n nl = {\n 'location_id': location[0],\n 'location_town': location[1],\n 'location_address': location[2],\n 'distance': dist\n }\n elif nl['distance'] > dist:\n nl = {\n 'location_id': location[0],\n 'location_town': location[1],\n 'location_address': location[2],\n 'distance': dist\n }\n return nl\n\n #Функции для диалога с пользователем\n @bot.message_handler(content_types=['text'])\n def start(message):\n if checkUser(file_db, message.from_user.id):\n if checkUserInQueue(file_db, message.from_user.id):\n markup = InlineKeyboardMarkup()\n markup.row_width = 2\n markup.add(InlineKeyboardButton(\"Я уже не хочу кофе\", callback_data=\"cancel_queue\"))\n bot.send_message(message.from_user.id, checkUser(file_db, message.from_user.id) + \", я помню, что ты хочешь пойти попить кофейку. Как только я найду для тебя пару, сразу сообщу.\", reply_markup=markup)\n\n elif message.text == '/stopchat':\n markup = InlineKeyboardMarkup()\n markup.row_width = 2\n markup.add(InlineKeyboardButton(\"Да\", callback_data=\"stopchat_yes\"),\n InlineKeyboardButton(\"Нет\", callback_data=\"stopchat_no\"))\n bot.send_message(message.from_user.id, \"Вы желаете остановить диалог с собеседником?\", reply_markup=markup)\n\n elif message.text == '/showcs':\n InfoByTelegramId = getInfoByTelegramId(file_db, message.from_user.id)\n location_id = InfoByTelegramId['location_id']\n location_info = getLocationInfo(file_db, location_id)\n #Делаем 5 попыток получить ближайшие места, потому что бывает что сервис Google отвечает пустым ответом\n i = 5\n while i > 0:\n nearest_coffee = getLocations(location_info[3],location_info[4],\"AIzaSyBFE4VdqZXTAYEfRRiUIRLss12UBsTfh2U\", 10)\n if nearest_coffee != \"\":\n i = 0\n else:\n i = i - 1\n if nearest_coffee != \"\":\n bot.send_message(message.from_user.id, \"Ближайшие места, где можно попить кофе:\\n\" + nearest_coffee, parse_mode=\"Markdown\", disable_web_page_preview=True)\n\n else:\n bot.send_message(message.from_user.id, \"К сожалению, не удалось ничего найти. Иногда нас подводит сервис Google, попробуй повторить /showcs\")\n\n elif checkActiveDialog(file_db, message.from_user.id):\n #bot.send_message(message.from_user.id, \"У тебя есть активный диалог\")\n text = str(\"Вам пишет {0}: \".format(checkUser(file_db, message.from_user.id)))\n bot.send_message(getCompanionId(file_db, message.from_user.id), text + message.text)\n\n else:\n markup = InlineKeyboardMarkup()\n markup.row_width = 2\n markup.add(InlineKeyboardButton(\"Да\", callback_data=\"cb_yes\"),\n InlineKeyboardButton(\"Пока не готов\", callback_data=\"cb_no\"))\n bot.send_message(message.from_user.id, \"Привет, \" + checkUser(file_db, message.from_user.id) + \"! Пора попить кофейку ☕️ ?\", reply_markup=markup)\n\n elif getCode(file_db, message.from_user.id):\n infoCode = getCode(file_db, message.from_user.id)\n bot.send_message(message.from_user.id, 'На твою почту ' + infoCode[3] + ' был отправлен код подтверждения. Пожалуйста, введи его. Если ты ошибся при вводе e-mail, нажми /setemail')\n bot.register_next_step_handler(message, get_check_pin)\n else:\n if message.text == '/reg':\n bot.send_message(message.from_user.id, \"Регистрация займет пару минут! Чтобы мне удостовериться, что ты из банка Открытие, потребуется подтверждение по E-mail. Введи адрес своей корпоративной почты email@open.ru\")\n bot.register_next_step_handler(message, get_email); #следующий шаг – функция get_email\n else:\n bot.send_message(message.from_user.id, 'Добро пожаловать в CoffeeBot! Этот чатбот поможет найти собеседника на чашку кофе на площадках банка Открытие. Для регистрации нажми /reg')\n\n def get_email(message): #получаем email\n global email\n email = message.text.lower()\n pattern = compile('(^|\\s)[-a-z0-9_.]+@([-a-z0-9]+\\.)+[a-z]{2,6}(\\s|$)')\n is_valid = pattern.match(email)\n if is_valid:\n email_a = email.split('@')\n if email_a[1] == \"open.ru\":\n global code\n code = random.randint(1000,9999)\n newCode(file_db, message.from_user.id, email, code)\n send_code(email_smtp_server, email_login, email_pwd, email, code)\n bot.send_message(message.from_user.id, 'На твою почту ' + email + ' был отправлен код подтверждения. Пожалуйста, введи его. Если ты ошибся при вводе e-mail, нажми /setemail')\n bot.register_next_step_handler(message, get_check_pin)\n else:\n bot.send_message(message.from_user.id, \"Введи адрес своей почты email@open.ru\")\n bot.register_next_step_handler(message, get_email); #следующий шаг – функция get_email\n else:\n bot.send_message(message.from_user.id, \"Введи адрес своей почты email@open.ru\")\n bot.register_next_step_handler(message, get_email); #следующий шаг – функция get_email\n\n def get_check_pin(message): #проверяем Pin\n pin = message.text\n infoCode = getCode(file_db, message.from_user.id)\n if infoCode[4] == pin:\n bot.send_message(message.from_user.id, 'Теперь я могу тебе доверять =) Осталось нам с тобой познакомиться и можно будет попить кофейку ☕️. Введи свое имя.')\n bot.register_next_step_handler(message, get_name)\n\n elif message.text == '/setemail':\n deleteCode(file_db, message.from_user.id)\n bot.send_message(message.from_user.id, \"Ок, будь внимательнее! Введи адрес своей корпоративной почты email@open.ru\")\n bot.register_next_step_handler(message, get_email); #следующий шаг – функция get_email\n\n else:\n bot.send_message(message.from_user.id, 'Код неверный. Попробуй еще раз.')\n bot.register_next_step_handler(message, get_check_pin)\n\n\n def get_name(message): #получаем фамилию\n name = message.text\n addTempName(file_db, message.from_user.id, name)\n bot.send_message(message.from_user.id, 'Какая у тебя фамилия?')\n bot.register_next_step_handler(message, get_reg_end)\n\n\n def get_reg_end(message):\n surname = message.text\n name = getTempName(file_db, message.from_user.id)\n newUser(file_db, message.from_user.id, name, surname) #Добавляем пользователя в БД\n deleteCode(file_db, message.from_user.id)\n markup = InlineKeyboardMarkup()\n markup.row_width = 2\n markup.add(InlineKeyboardButton(\"Да\", callback_data=\"cb_yes\"),\n InlineKeyboardButton(\"Пока не готов\", callback_data=\"cb_no\"))\n bot.send_message(message.from_user.id, \"Ура! Вот мы и познакомились =) Пора попить кофе?\", reply_markup=markup)\n\n\n #Обработка нажатия кнопок\n @bot.callback_query_handler(func=lambda call: True)\n def callback_query(call):\n\n call_a = call.data.split('_')\n if call_a[0] == \"cancel\":\n delete_user_from_queue(file_db, call.from_user.id)\n bot.send_message(call.from_user.id, 'Ок, как будешь готов попить кофе, дай мне знать 😉')\n elif call.data == \"stopchat_yes\":\n bot.answer_callback_query(call.id, \"Остановка диалога\")\n if checkActiveDialog(file_db, call.from_user.id):\n bot.send_message(getCompanionId(file_db, call.from_user.id), 'Диалог остановлен. Надеюсь все прошло хорошо, возвращайся снова! Если захочешь попить кофейку, напиши любое слово')\n bot.send_message(call.from_user.id, 'Диалог остановлен. Надеюсь все прошло хорошо, возвращайся снова! Если захочешь попить кофейку, напиши любое слово')\n disableDialog(file_db, call.from_user.id)\n bot.clear_step_handler_by_chat_id(chat_id=call.message.chat.id)\n else:\n bot.send_message(call.from_user.id, 'В данный момент нет активных диалогов')\n elif call.data == \"stopchat_no\":\n bot.answer_callback_query(call.id, \"Остановка диалога\")\n if checkActiveDialog(file_db, call.from_user.id):\n bot.send_message(call.from_user.id, 'Вы можете продолжить беседу')\n bot.clear_step_handler_by_chat_id(chat_id=call.message.chat.id)\n else:\n bot.send_message(call.from_user.id, 'В данный момент нет активных диалогов')\n\n if not checkUserInQueue(file_db, call.from_user.id) and not checkActiveDialog(file_db, call.from_user.id): #Не реагируем на кнопки, если есть активный диалог или ожидание в очереди\n if call_a[0] == \"town\":\n address = getLocationAddress(file_db, call_a[1])\n markup = InlineKeyboardMarkup()\n for row in address:\n markup.add(InlineKeyboardButton(row[2], callback_data=\"gotoqueue_\" + str(row[0])))\n bot.send_message(call.from_user.id, \"Выбери адрес или поделись геопозицией и я сам пойму где ты находишься 😏\", reply_markup=markup)\n\n elif call_a[0] == \"gotoqueue\":\n if checkUserInQueue(file_db, call.from_user.id):\n utput = str(\"Я уже подыскиваю пару для тебя.\")\n bot.send_message(call.from_user.id, output)\n else:\n bot.answer_callback_query(call.id, \"Поиск собеседника\")\n InfoByTelegramId = getInfoByTelegramId(file_db, call.from_user.id)\n location_id = InfoByTelegramId['location_id']\n location_info = getLocationInfo(file_db, location_id)\n add_to_queue(file_db, call.from_user.id, location_id, 0, \"\")\n output = str(\"Я добавил тебя в список желающих попить кофе по адресу {0}, {1}. Как только я найду пару, сразу сообщу.\".format(location_info[1], location_info[2]))\n bot.send_message(call.from_user.id, output)\n\n bot.clear_step_handler_by_chat_id(chat_id=call.message.chat.id)\n\n elif call_a[0] == \"givephoto\":\n bot.answer_callback_query(call.id, \"Приложи фото\")\n updateLocation(file_db, call.from_user.id, call_a[1])\n markup = InlineKeyboardMarkup()\n markup.row_width = 2\n markup.add(InlineKeyboardButton(\"Я не хочу загружать фото\", callback_data=\"gotoqueue\"))\n bot.send_message(call.from_user.id, \"Приложи свое селфи и я порекомендую тебе кофе и найду наиболее подходящего собеседника!\", reply_markup=markup)\n\n\n elif call.data == \"cb_nolocation\":\n bot.send_message(call.from_user.id, \" Поделись геопозицией, чтобы я понял на какой площадке ты находишься 😏\")\n\n\n elif call.data == \"cb_yes\":\n bot.answer_callback_query(call.id, \"Поделись геопозицией\")\n userInfo = getInfoByTelegramId(file_db, call.from_user.id)\n if userInfo['location_id']:\n location_info = getLocationInfo(file_db, userInfo['location_id'] )\n location = str(\"Последний раз ты был на локации по адресу: {0}, {1}. Сейчас ты здесь?\".format(location_info[1], location_info[2]))\n markup = InlineKeyboardMarkup()\n markup.row_width = 2\n markup.add(InlineKeyboardButton(\"Да\", callback_data=\"givephoto_\" + str(userInfo['location_id'])),\n InlineKeyboardButton(\"Нет\", callback_data=\"cb_nolocation\"))\n bot.send_message(call.from_user.id, location, reply_markup=markup)\n\n else:\n bot.send_message(call.from_user.id, \" Поделись геопозицией, чтобы я понял на какой площадке ты находишься 😏\")\n\n elif call.data == \"cb_no\":\n bot.answer_callback_query(call.id, \"Ок, как будешь готов, пиши 😉\")\n bot.send_message(call.from_user.id, \"Ок, как будешь готов, пиши 😉\")\n\n else:\n if not checkActiveDialog(file_db, call.from_user.id) and call.data != \"stopchat_no\":\n bot.answer_callback_query(call.id, \"Кнопки не доступны\")\n bot.send_message(call.from_user.id, 'В данный момент эти кнопки не доступны')\n\n\n @bot.message_handler(content_types=['location'])\n def handle_docs_location(message):\n nl = nearestLocation(message.location.latitude, message.location.longitude)\n location = str(\"Ты находишься по адресу: {0}, {1}?\".format(nl['location_town'], nl['location_address']))\n\n markup = InlineKeyboardMarkup()\n markup.row_width = 2\n markup.add(InlineKeyboardButton(\"Да\", callback_data=\"givephoto_\" + str(nl['location_id'])),\n InlineKeyboardButton(\"Нет\", callback_data=\"cb_yes\"))\n bot.send_message(message.from_user.id, location, reply_markup=markup)\n\n\n @bot.message_handler(content_types=['photo'])\n def handle_docs_photo(message):\n if not checkUserInQueue(file_db, message.from_user.id) and not checkActiveDialog(file_db, message.from_user.id):\n bot.send_message(message.from_user.id, \"Анализирую твое фото...\")\n try:\n file_info = bot.get_file(message.photo[len(message.photo)-1].file_id)\n downloaded_file = bot.download_file(file_info.file_path)\n\n src=file_info.file_path;\n with open(\"photos/\" + str(message.from_user.id) + \".jpg\", 'wb') as new_file:\n new_file.write(downloaded_file)\n\n azureInfo = getInfoByPhoto(\"https://westcentralus.api.cognitive.microsoft.com/face/v1.0/\", ms_token,\"./photos/\"+ str(message.from_user.id) +\".jpg\")\n bot.reply_to(message, azureInfo['text'])\n\n InfoByTelegramId = getInfoByTelegramId(file_db, message.from_user.id)\n location_id = InfoByTelegramId['location_id']\n if location_id:\n location_info = getLocationInfo(file_db, location_id)\n add_to_queue(file_db, message.from_user.id, location_id, 1, azureInfo['faceId'])\n output = str(\"Я добавил тебя в список желающих попить кофе по адресу {0}, {1}. Как только я найду пару, сразу сообщу.\".format(location_info[1], location_info[2]))\n bot.send_message(message.from_user.id, output)\n bot.clear_step_handler_by_chat_id(chat_id=message.chat.id)\n else:\n output = \"Пока я не понимаю, что мне делать с этим фото. Напиши что-нибудь, если хочешь выпить кофейку =)\"\n bot.send_message(message.from_user.id, output)\n bot.clear_step_handler_by_chat_id(chat_id=message.chat.id)\n\n except Exception as e:\n bot.reply_to(message, \"Не удалось проанализировать фото\" )\n InfoByTelegramId = getInfoByTelegramId(file_db, message.from_user.id)\n location_id = InfoByTelegramId['location_id']\n if location_id:\n location_info = getLocationInfo(file_db, location_id)\n add_to_queue(file_db, message.from_user.id, location_id, 0, \"\")\n output = str(\"Я добавил тебя в список желающих попить кофе по адресу {0}, {1}. Как только я найду пару, сразу сообщу.\".format(location_info[1], location_info[2]))\n bot.send_message(message.from_user.id, output)\n bot.clear_step_handler_by_chat_id(chat_id=message.chat.id)\n else:\n output = \"Пока я не понимаю, что мне делать с этим фото. Напиши что-нибудь, если хочешь выпить кофейку =)\"\n bot.send_message(message.from_user.id, output)\n bot.clear_step_handler_by_chat_id(chat_id=message.chat.id)\n\n elif checkActiveDialog(file_db, message.from_user.id): #Отправка изображений собеседнику\n file_info = bot.get_file(message.photo[len(message.photo)-1].file_id)\n downloaded_file = bot.download_file(file_info.file_path)\n src=file_info.file_path;\n with open(\"sent_images/\" + str(message.from_user.id) + \".jpg\", 'wb') as new_file:\n new_file.write(downloaded_file)\n img=open(\"sent_images/\" + str(message.from_user.id) + \".jpg\",'rb')\n bot.send_message(getCompanionId(file_db, message.from_user.id), \"Вам прислали изображение:\")\n bot.send_photo(getCompanionId(file_db, message.from_user.id),img)\n\n else:\n output = \"Пока я не понимаю, что мне делать с этим фото.\"\n bot.send_message(message.from_user.id, output)\n bot.clear_step_handler_by_chat_id(chat_id=message.chat.id)\n\n bot.polling(none_stop=True, interval=0)\n\n\ndef timerFunc():\n def create_appointment(telegram_id_1, telegram_id_2, photo_flag_1, photo_flag_2, queue_id_1, queue_id_2, location_id):\n info_user_1 = getUserInfo(file_db, telegram_id_1)\n info_user_2 = getUserInfo(file_db, telegram_id_2)\n bot.send_message(telegram_id_1, str(\"Ура! Ты идешь пить кофе с сотрудником {0}!\".format(info_user_2[2])))\n bot.send_message(telegram_id_2, str(\"Ура! Ты идешь пить кофе с сотрудником {0}!\".format(info_user_1[2])))\n if photo_flag_2 == 1:\n img=open(\"photos/\" + str(telegram_id_2) + \".jpg\",'rb')\n bot.send_photo(telegram_id_1,img)\n if photo_flag_1 == 1:\n img=open(\"photos/\" + str(telegram_id_1) + \".jpg\",'rb')\n bot.send_photo(telegram_id_2,img)\n bot.send_message(telegram_id_1, str(\"Далее ты продолжаешь диалог с сотрудником {0}, договоритесь о времени и месте встречи! Чтобы остановить диалог, в любой момент нажми /stopchat. Чтобы найти места с кофе поблизости, нажми /showcs. Хорошей беседы!\".format(info_user_2[2])))\n bot.send_message(telegram_id_2, str(\"Далее ты продолжаешь диалог с сотрудником {0}, договоритесь о времени и месте встречи! Чтобы остановить диалог, в любой момент нажми /stopchat. Чтобы найти места с кофе поблизости, нажми /showcs. Хорошей беседы!\".format(info_user_1[2])))\n add_to_couple(file_db, telegram_id_1, telegram_id_2, location_id) #Добавление пары в таблицу\n delete_from_queue(file_db, queue_id_1, queue_id_2) #Удаление пользователей из очереди\n\n def searchCouples():\n conn = sqlite3.connect(file_db)\n cursor = conn.cursor()\n #Отбираем локации, в которых стоит более 1 в очереди\n cursor.execute('SELECT location_id FROM queue GROUP BY location_id HAVING COUNT(*) > 1')\n data_locations = cursor.fetchall()\n if len(data_locations) > 0:\n for location_id in data_locations: #Перебираем все локации, где в очереди стоят больше 1 человека\n i = True\n while i is True:\n #определяем количество в очереди\n cursor.execute('SELECT * FROM queue WHERE location_id=' + str(location_id[0]))\n data_people = cursor.fetchall()\n if len(data_people) > 2: # Если больше 2, то отбираем всех с фото\n cursor.execute('SELECT * FROM queue WHERE photo_flag=\"1\" AND location_id=' + str(location_id[0]))\n data_people_photo = cursor.fetchall()\n if len(data_people_photo) > 2: # Если больше 2, то сравниваем схожесть\n face_id = data_people_photo[0][5]\n face_ids = []\n for person in data_people_photo:\n if person[5] != face_id:\n face_ids.append(person[5])\n face_id_second = useFaceApiFindSimilar(\"https://westcentralus.api.cognitive.microsoft.com/face/v1.0/findsimilars\",ms_token,face_id,face_ids)\n cursor.execute('SELECT * FROM queue WHERE face_id IN (\"'+face_id+'\", \"'+face_id_second['faceId']+'\")')\n data = cursor.fetchall()\n create_appointment(data[0][2], data[1][2], data[0][4], data[1][4], data[0][0], data[1][0], data[0][3])\n elif len(data_people) == 2: #Если только двое с фотками, то сводим их\n create_appointment(data_people_photo[0][2],\n data_people_photo[1][2],\n data_people_photo[0][4],\n data_people_photo[1][4],\n data_people_photo[0][0],\n data_people_photo[1][0],\n data_people_photo[0][3])\n else: #Если меньше двух с фотками, ты сводим людей без фильтра по фоткам\n create_appointment(data_people[0][2], data_people[1][2], data_people[0][4], data_people[1][4], data_people[0][0], data_people[1][0], data_people[0][3])\n\n elif len(data_people) == 2: #Сводим двоих\n data = cursor.fetchall()\n create_appointment(data_people[0][2], data_people[1][2], data_people[0][4], data_people[1][4], data_people[0][0], data_people[1][0], data_people[0][3])\n\n else:\n i = False\n\n #Проверка времени нахождения в очереди\n cursor.execute('SELECT * FROM queue')\n\n a = datetime.datetime.now()\n for row in cursor.fetchall():\n b = datetime.datetime.strptime(row[1], '%Y-%m-%d %H:%M:%S')\n c = a - b - datetime.timedelta(minutes=180)\n if c.seconds/60 > 30:\n cursor.execute(str(\"DELETE FROM queue WHERE queue_id = '{0}'\".format(str(row[0]))))\n conn.commit()\n bot.send_message(row[2], \"К сожалению, пару для кофепития найти не удалось. Попробуй еще раз!\")\n\n searchCouples()\n\n\ndef setInterval(interval):\n def decorator(function):\n def wrapper(*args, **kwargs):\n stopped = threading.Event()\n\n def loop(): # executed in another thread\n while not stopped.wait(interval): # until stopped\n function(*args, **kwargs)\n\n t = threading.Thread(target=loop)\n t.daemon = True # stop if the program exits\n t.start()\n return stopped\n return wrapper\n return decorator\n\n\n#Запускаем потоки с поллером телеграма и таймером\nif __name__ == '__main__':\n #Запускаем поток с таймером\n @setInterval(5)\n def function():\n timerFunc()\n stop = function()\n\n #Запускаем поток с поллером телеграмма и функциями диалога\n cbClassp = cbClass()\n Thread(target = cbClassp.cbFunc()).start()\n" } ]
7
espin086/DataScienceToolKitInR
https://github.com/espin086/DataScienceToolKitInR
f1985f872ce5cc2955e104cb9e6e82b972a6f993
7f50165d6eb28b040a6a0d8a4db4cab4ab32fcfb
14240ff0863252d4f2d07ea39f2d62ef57b9660b
refs/heads/master
2022-06-18T15:40:44.207647
2020-05-05T17:09:58
2020-05-05T17:09:58
56,411,761
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.40297120809555054, "alphanum_fraction": 0.4196843206882477, "avg_line_length": 34.83333206176758, "blob_id": "880a49bc9515b5dfa112c16ffa3025637662320a", "content_id": "65bf90a1b5a94e6894662d83cbf037d9824dd972", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 1077, "license_type": "no_license", "max_line_length": 65, "num_lines": 30, "path": "/1. Data Visualization/Visualization Services - ScatterPlot.R", "repo_name": "espin086/DataScienceToolKitInR", "src_encoding": "UTF-8", "text": "x <- 1:100\ny <- 2*x + x*sin(x/5) + (x/2)*rnorm(100)\n\njj.scatter <- function(x,y){\n \n df <- as.data.frame(cbind(x,y))\n \n p <- ggplot(df, aes(x = x, y = y))\n p <- p + geom_point(colour = \"black\", size = 5) \n p <- p + geom_rug(sides=\"bl\",col=\"red\", alpha=.3)\n p <- p + geom_smooth(method='lm',formula=y~x)\n p <- p + geom_hline(aes(yintercept=mean(y, na.rm=T)), \n colour = \"red\", alpha=.3,\n size=1)\n p <- p + geom_vline(aes(xintercept=mean(x, na.rm=T)), \n color=\"red\", alpha=.3,\n size=1)\n jj_theme <- theme_bw(base_size = 12) + \n theme(axis.line = element_line(colour = \"black\"),\n panel.grid.major = element_blank(),\n panel.grid.minor = element_blank(),\n panel.border = element_blank(),\n panel.background = element_blank())\n \n p <- p + jj_theme\n \n p\n}\n\njj.scatter(x, y)\n\n\n" }, { "alpha_fraction": 0.5490304827690125, "alphanum_fraction": 0.5529085993766785, "avg_line_length": 31.781818389892578, "blob_id": "b2fb938a01287eb8bd63a71a331afd01523399e7", "content_id": "cd6a74e37bc6f268267141b4654b7b40840b331e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 1805, "license_type": "no_license", "max_line_length": 114, "num_lines": 55, "path": "/3. Machine Learning Models/0. General Machine Learning Model/0. Source Code - Prep Data.R", "repo_name": "espin086/DataScienceToolKitInR", "src_encoding": "UTF-8", "text": "#####################################################\n#Sub Routines\n#####################################################\n\n#Removing zerio variance function\nremove.zero <- function(dataframe){\n library(caret)#used for removing zero variance\n nzv <- nearZeroVar(dataframe, saveMetrics= TRUE)\n nzv<-nzv[nzv$nzv==\"TRUE\",]\n nzv<-row.names(nzv)\n myvars <- names(dataframe) %in% nzv\n return(dataframe)\n}\n#Center and scaling\ncenter.scale <- function(dataframe){\n library(MASS) #used for center and scaling\n #Centering and scaling numberical data\n ind.n <- sapply(dataframe, is.numeric)\n dataframe[ind.n] <- lapply(dataframe[ind.n], scale)\n return(dataframe)\n}\n#Performing MICE Imputation\nmice.imp <- function(dataframe){\n library(mice)\n mice_mod <- mice(dataframe, method='rf') \n dataframe <- complete(mice_mod)\n return(dataframe)\n}\n#Hot Encoding all factor variables\nhot.encode <- function(dataframe){\n dataframe <- model.matrix(~ ., data=dataframe, \n contrasts.arg = lapply(dataframe[,sapply(dataframe, is.factor)], contrasts, contrasts=FALSE))\n return(dataframe)\n \n}\n\n#####################################################\n#Main Function\n#####################################################\n\nprep.data <- function(dataframe){\n \n #Removing data with zero variance\n dataframe.1 <- remove.zero(dataframe)\n #Center Scale\n dataframe.2 <- center.scale(dataframe)\n #Mice Imputation\n dataframe.3 <- mice.imp(dataframe)\n #Hot Encode factors\n #dataframe.4 <- hot.encode(dataframe)\n #Returning dataframe\n results <- list(dataframe.1, dataframe.2, dataframe.3)\n return(results)\n \n}\n\n\n" }, { "alpha_fraction": 0.6614063382148743, "alphanum_fraction": 0.6813823580741882, "avg_line_length": 43.6875, "blob_id": "630efa8ac683578ef03de1e3b6b3a576847a4a3a", "content_id": "3452ea412683bfc4f4dd1e809578438da247aa12", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 5006, "license_type": "no_license", "max_line_length": 128, "num_lines": 112, "path": "/2. Regression Models/0. Cobb-Douglas Production Models/2. Cobb-Douglas Cost Functions - Testing Assumptions.R", "repo_name": "espin086/DataScienceToolKitInR", "src_encoding": "UTF-8", "text": "library(miscTools)\n\n############################################\n#Estimation and analysis of Cobb-Douglas Cost Function\n\n#This production function violates the homogeneity assumption\ncostCD <- lm(log(cost) ~ log(pProps) + log(pDirector) + log(pActors) + log(qTotalView), data = dat)\nsummary(costCD)\n\n#The following variable transformation imposes homogeneity by normalizing by actors\ncostCDHom <- lm(log(cost/pActors) ~ log(pProps/pActors) + log(pDirector/pActors) + log(qTotalView), data = dat)\nsummary(costCDHom)\n\n#The following variable transformation imposes homogeneity by normalizing by props\ncostCDHomCap <- lm(log(cost/pProps) ~ log(pDirector/pProps) + log(pActors/pProps) + log(qTotalView), data = dat)\nsummary(costCDHomCap)\n \n############################################\n#Checking Assumption of Homogeneity in Input Prices\n\n#Test for homogeneity in input prices\nlibrary( \"car\" )\n#if test below have p-values greater than .10 than we do no contradict linear homogenerity in input prices, this is a good thing\nlinearHypothesis( costCD, \"log(pProps) + log(pDirector) + log(pActors) = 1\" )\nlrtest( costCDHom, costCD )\n\n############################################\n#Checking Assumption of Concavity of Input Prices with regression WITHOUT homogeneity imposed\n\ndat$costCD <- exp(fitted(costCD))\n\n#simplifying calcs by defining short-cuts for coefficients\ncProps <- coef(costCD)[\"log(pProps)\"]\ncDirector <- coef(costCD)[\"log(pDirector)\"]\ncActors <- coef(costCD)[\"log(pActors)\"]\n\n#Calculating the Hessian Matrix\nhPropsProps <- cProps * ( cProps - 1 ) * dat$costCD / dat$pProps^2\nhDirectorDirector <- cDirector * ( cDirector - 1 ) * dat$costCD / dat$pDirector^2\nhActorsActors <- cActors * ( cActors - 1 ) * dat$costCD / dat$pActors^2\nhPropsDirector <- cProps * cDirector * dat$costCD / ( dat$pProps * dat$pDirector )\nhPropsActors <- cProps * cActors * dat$costCD / ( dat$pProps * dat$pActors )\nhDirectorActors <- cDirector * cActors * dat$costCD / ( dat$pDirector * dat$pActor )\n\n#Producing the Hessian Matrix for the first observation\n\nhessian <- matrix(NA, nrow = 3, ncol = 3)\nhessian[1,1] <- hPropsProps[1]\nhessian[2,2] <- hDirectorDirector[1]\nhessian[3,3] <- hActorsActors[1]\nhessian[1,2] <- hessian[2,1] <- hPropsDirector[1]\nhessian[1,3] <- hessian[3,1] <- hPropsActors[1]\nhessian[2,3] <- hessian[3,2] <- hDirectorActors[1]\n\nprint(hessian)\n\n#Testing for concavity (aka semidefinteness) on first observation\nsemidefiniteness( hessian, positive = FALSE )#if false then we don't have \n\n#Testing for concavity (aka semidefinteness) on all observations\ndat$concaveCD <- NA\nfor( obs in 1:nrow( dat ) ) {\n hessianLoop <- matrix( NA, nrow = 3, ncol = 3 )\n hessianLoop[ 1, 1 ] <- hPropsProps[obs]\n hessianLoop[ 2, 2 ] <- hDirectorDirector[obs]\n hessianLoop[ 3, 3 ] <- hActorsActors[obs]\n hessianLoop[ 1, 2 ] <- hessianLoop[ 2, 1 ] <- hPropsDirector[obs]\n hessianLoop[ 1, 3 ] <- hessianLoop[ 3, 1 ] <- hPropsActors[obs]\n hessianLoop[ 2, 3 ] <- hessianLoop[ 3, 2 ] <- hDirectorActors[obs]\n dat$concaveCD[obs] <- semidefiniteness( hessianLoop, positive = FALSE ) }\nsum( dat$concaveCD )\n\n\n\n############################################\n#Checking Assumption of Concavity of Input Prices with regression WITH homogeneity imposed\ndat$costCDHom <- exp( fitted( costCDHom ) ) * dat$pActors\n\nchProps <- coef( costCDHom )[ \"log(pProps/pActors)\" ]\nchDirector <- coef( costCDHom )[ \"log(pDirector/pActors)\" ]\nchActors <- 1 - chProps - chDirector\n\n\n#Calculating the Hessian Matrix\nhhPropsProps <- chProps * ( chProps - 1 ) * dat$costCD / dat$pProps^2\nhhDirectorDirector <- chDirector * ( chDirector - 1 ) * dat$costCD / dat$pDirector^2\nhhActorsActors <- chActors * ( chActors - 1 ) * dat$costCD / dat$pActors^2\nhhPropsDirector <- chProps * chDirector * dat$costCD / ( dat$pProps * dat$pDirector )\nhhPropsActors <- chProps * chActors * dat$costCD / ( dat$pProps * dat$pActors )\nhhDirectorActors <- chDirector * chActors * dat$costCD / ( dat$pDirector * dat$pActor )\n\n#New Hessian Matrix\nhessianHom <- matrix( NA, nrow = 3, ncol = 3 )\nhessianHom[ 1, 1 ] <- hhPropsProps[1]\nhessianHom[ 2, 2 ] <- hhDirectorDirector[1]\nhessianHom[ 3, 3 ] <- hhActorsActors[1]\nhessianHom[ 1, 2 ] <- hessianHom[ 2, 1 ] <- hhPropsDirector[1]\nhessianHom[ 1, 3 ] <- hessianHom[ 3, 1 ] <- hhPropsActors[1]\nhessianHom[ 2, 3 ] <- hessianHom[ 3, 2 ] <- hhDirectorActors[1]\nprint( hessianHom )\n\nsemidefiniteness( hessianHom, positive = FALSE )#if false then we don't have \n\n#This shows that concavity was not violated any any observation\ndat$concaveCDHom <- NA\nfor( obs in 1:nrow( dat ) ) {\n hessianPart <- matrix( NA, nrow = 2, ncol = 2 )\n hessianPart[ 1, 1 ] <- hhPropsProps[obs]\n hessianPart[ 2, 2 ] <- hhDirectorDirector[obs]\n hessianPart[ 1, 2 ] <- hessianPart[ 2, 1 ] <- hhPropsDirector[obs]\n dat$concaveCDHom[obs] <- semidefiniteness( hessianPart, positive = FALSE ) }\nsum(!dat$concaveCDHom) #Shows that concavity was not violated at any single observation\n\n" }, { "alpha_fraction": 0.6549349427223206, "alphanum_fraction": 0.6618209481239319, "avg_line_length": 49.19230651855469, "blob_id": "d6535756ddddfff746a7f08651f0b6209b5eff86", "content_id": "d50daa0cfa607d94a6ce364b224287a1dee2600f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 1307, "license_type": "no_license", "max_line_length": 257, "num_lines": 26, "path": "/2. Regression Models/0. Cobb-Douglas Production Models/5. Cobb-Douglas Cost Functions - Marginal Cost-Average Cost-Total Cost.R", "repo_name": "espin086/DataScienceToolKitInR", "src_encoding": "UTF-8", "text": "\n#Calculating marginal cost and visualizing\nchOut <- coef( costCDHom )[ \"log(qTotalView)\" ]\ndat$margCost <- chOut * (dat$costCDHom / dat$qTotalView)\nhist(dat$margCost, breaks = 20, col = \"red\", xlab = \"Marginal Cost\", ylab = \"Number of Films\", main=\"Marginal Cost of Producing Film\")\n\n#Marginal costs are greater than the price of output; profit maximization says continue to produce until marginal cost equals margina revenue (benefit), and since marginal cost is lower than marginal revenue most firms should continue to produce more output\ncompPlot( dat$pTotalView, dat$margCost, log = \"xy\" )\n\n#Marginal cost based on the total output\nplot( dat$qTotalView, dat$margCost )\nplot( dat$qTotalView, dat$margCost, log = \"xy\" )\n\n#Total, Marginal and Average Cost Curves\ny <- seq( 0, max( dat$qTotalView), length.out = 200 )\nchInt <- coef(costCDHom)[ \"(Intercept)\" ]\ncosts <- exp( chInt + chProps * log( mean( dat$pProps ) ) + \n chDirector * log( mean( dat$pDirector ) ) + \n chActors * log( mean( dat$pActors ) ) + \n chOut*log(y))\n\nplot( y, costs, type = \"l\" )\n# average costs\nplot( y, costs/y, type = \"l\" )\n# marginal costs\nlines( y, chOut * costs / y, lty = 2 )\nlegend( \"right\", lty = c( 1, 2 ), legend = c( \"average costs\", \"marginal costs\" ) )\n\n" }, { "alpha_fraction": 0.680111289024353, "alphanum_fraction": 0.6811543703079224, "avg_line_length": 57.67346954345703, "blob_id": "9a06647baf0afcac5972e653b0625ee412650dd8", "content_id": "8f1a13225f1de24437a702db5aca4d6639458447", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 2876, "license_type": "no_license", "max_line_length": 189, "num_lines": 49, "path": "/2. Regression Models/0. Cobb-Douglas Production Models/1. Production Functions.R", "repo_name": "espin086/DataScienceToolKitInR", "src_encoding": "UTF-8", "text": "####################\n#Calculating average products\n\ndat$apProps<- dat$qTotalView / dat$qProps\ndat$apDirector <- dat$qTotalView / dat$qDirector\ndat$apActors <- dat$qTotalView / dat$qActors\n\nhist(dat$apProps, col = \"red\", main = \"Output Per Prop\", xlab = \"Average Product of Props\", ylab = \"Number of Films\")\n\nhist(dat$apDirector, col = \"red\", main = \"Output Per Director\", xlab = \"Average Product of Directors\", ylab = \"Number of Films\")\n\nhist(dat$apActors, col = \"red\", main = \"Output Per Actor\", xlab = \"Average Product of Actors\", ylab = \"Number of Films\")\n\n####################\n#Relationships between average products\nplot(dat$apProps, dat$apDirector, main = \"Relationship Between Average Products\", xlab = \"Average Product of Props\", ylab = \"Average Product of Director\", col = \"red\")\nplot(dat$apProps, dat$apActors, main = \"Relationship Between Average Products\",xlab = \"Average Product of Props\", ylab = \"Average Product of Actors\", col= \"red\")\nplot(dat$apActors, dat$apDirector, main = \"Relationship Between Average Products\" , xlab = \"Average Product of Actors\", ylab = \"Average Product of Director\", col = \"red\")\n\n####################\n#Average product and firm size (as imperfectly measured by output)\n\nplot(dat$qTotalView, dat$apProps, main = \"Relationship Between Average Products and Firm Size\", xlab = \"Total Viewership\", ylab = \"Average Product of Props\", col = \"red\", log = \"x\")\n\nplot(dat$qTotalView, dat$apDirector, main = \"Relationship Between Average Products and Firm Size\", xlab = \"Total Viewership\", ylab = \"Average Product of Directors\", col = \"red\", log = \"x\")\n\nplot(dat$qTotalView, dat$apActors, main = \"Relationship Between Average Products and Firm Size\", xlab = \"Total Viewership\", ylab = \"Average Product of Actors\", col = \"red\", log = \"x\")\n\n####################\n#Examining Total Factor Productivity\n\n#TFP by firm\ndat$tfp <- dat$qTotalView / dat$X #output / index of inputs\nhist(dat$tfp, col = \"red\", main = \"Total Factor Productivity of Films\", xlab = \"TFP\", ylab = \"Number of Films\")\n\n#TFP by output\nplot(dat$qTotalView, dat$tfp, main = \"Relationship Between TFP and Film Size\", xlab = \"Total Viewership\", ylab = \"TFP\", col = \"red\", log = \"x\")\n\n\n#TFP by input\nplot(dat$X, dat$tfp, main = \"Relationship Between TFP and Film Size\", xlab = \"Index of Inputs\", ylab = \"TFP\", col = \"red\", log = \"x\")\n\n####################\n#Some evidence that advisory services help\nboxplot( tfp ~ adv, data = dat, main = \"TFP Differences for Films with Consultant\", xlab = \"1 = Film with Consultant\", ylab= \"TFP\", col = \"red\")\n\nboxplot( log(qTotalView) ~ adv, data = dat, main = \"Viewership Differences for Films with Consultant\", xlab = \"1 = Film with Consultant\", ylab= \"Total Views\", col = \"red\")\n\nboxplot( log(X) ~ adv, data = dat, main = \"Input Differences for Films with Consultant\", xlab = \"1 = Film with Consultant\", ylab= \"Index of Inputs\", col = \"red\")\n\n" }, { "alpha_fraction": 0.6276252269744873, "alphanum_fraction": 0.6324717402458191, "avg_line_length": 29.121952056884766, "blob_id": "5acebed5248f12b156c1bc6bfe4478f793477179", "content_id": "1da3d90a26156e5d999fba8ef33eb869b6a94870", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 1238, "license_type": "no_license", "max_line_length": 108, "num_lines": 41, "path": "/3. Machine Learning Models/0. General Machine Learning Model/0. Main.R", "repo_name": "espin086/DataScienceToolKitInR", "src_encoding": "UTF-8", "text": "library(reshape)\n\n\n#Function for preparing data = prep.data\nsource(\"/Users/jjespinoza/Documents/my-toolbox/3. Machine Learning Models/0. Source Code - Prep Data.R\")\n\n#Function for Machine Learning = Models\nsource(\"/Users/jjespinoza/Documents/my-toolbox/3. Machine Learning Models/0. Source Code - ML Algorithms.R\")\n\nsetwd(\"/Users/jjespinoza/Documents/Kaggle - Titanic Disaster/2. Data\")\ndf <- read.csv(\"train (1).csv\")\n\n########################################\n#PREPARING DATA USING SOURCE CODE\n########################################\n\n#Identifying target variables\ntarget <- df[\"Survived\"]\n#Removing target and useless features\nmyvars <- names(df) %in% c(\"PassengerId\", \"Survived\", \"Name\", \"Ticket\", \"Cabin\") \ndf <- df[!myvars]\n#Correcting variable types\ndf$Pclass <- as.factor(df$Pclass)\n\n##############\n#Feature engineering\ndf <- prep.data(dataframe = df, target = Survived)\n\n#Merging target with features\ndf <- cbind(df, target)\n#Specifiying target variable\ndf$Survived <- as.factor(as.character(df$Survived))\n#Chaning name to identify target\ndf <- rename(df, c(Survived=\"target\"))\n\n\n########################################\n#BUILDING MACHINE LEARNING MODELS\n########################################\n\nmodels <- run.models(df, df)\n\n\n\n" }, { "alpha_fraction": 0.6530092358589172, "alphanum_fraction": 0.664814829826355, "avg_line_length": 33.560001373291016, "blob_id": "cd00aa0768d11a395c2f5a7895f62001aeb88d35", "content_id": "d64d31416aedfc834a4db8a792d03de8f97d7ffb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 4324, "license_type": "no_license", "max_line_length": 109, "num_lines": 125, "path": "/2. Regression Models/diagnostic - panel.R", "repo_name": "espin086/DataScienceToolKitInR", "src_encoding": "UTF-8", "text": "#Based the Princeton University Panel Regression Diagnostics document\n#http://www.princeton.edu/~otorres/Panel101R.pdf\n\n#################################\n#Panel Regression Diagnostic\nlibrary(plm)\n\ndata(\"Grunfeld\", package = \"AER\")\n\n#Exploring panel data\ncoplot(invest ~ year | firm, type=\"l\", data = Grunfeld)\nlibrary(car)\nscatterplot(invest ~ year | firm, \n smooth = TRUE, \n reg.line = FALSE, \n data = Grunfeld)\n\nlibrary(gplots)\nplotmeans(invest ~ firm, \n main=\"Heterogeineity across firms\", \n data=Grunfeld)\n\nplotmeans(invest ~ year, \n main=\"Heterogeineity across years\", \n data=Grunfeld)\n\n###################################\n#OLS Regression Model\nols <-lm(log(invest) ~ log(value) + log(capital), \n data=Grunfeld)\nsummary(ols)\n\n\n###################################\n#Fixed Effects Model\nfixed <- plm(log(invest) ~ log(value) + log(capital), \n data=Grunfeld, \n index=c(\"firm\", \"year\"), \n model=\"within\")\n\nsummary(fixed)\n\n#displaying fixed effects for each country\nfixef(fixed)\n\n#if p-value less than 0.05 then fixed effects is a better choice\npFtest(fixed, ols) \n\n###################################\n#Random Effects Model\nrandom <- plm(log(invest) ~ log(value) + log(capital), \n data=Grunfeld, \n index=c(\"firm\", \"year\"), \n model=\"random\")\n\nsummary(random)\n\n###################################\n#deciding between fixed and random effects\n#if p-value is less than 0.05 then used fixed effects\nphtest(fixed, random)\n\n###################################\n#Other diagnostic tests\n\n#Testing for time fixed effects, if p-value is less than 0.05 than use fixed effects\nfixed.time <- plm(log(invest) ~ log(value) + log(capital) + factor(year), \n data=Grunfeld, \n index=c(\"firm\",\"year\"), model=\"within\")\nsummary(fixed.time)\n\npFtest(fixed.time, fixed)\n\n#Random Effects or OLS\n#if p-value is less than 0.05 then use Random Effects\npool <- plm(log(invest) ~ log(value) + log(capital), \n data=Grunfeld, index=c(\"firm\", \"year\"), \n model=\"pooling\")\nsummary(pool)\nplmtest(pool, type=c(\"bp\"))\n\n\n#Testing for cross section dependence\n#if p is less than 0.05 then we have cross sectional independence\npcdtest(fixed, test = c(\"lm\"))\n\n#Testing for serial correlation\n#if p is less than 0.05 than there is serial correlation\npbgtest(fixed)\n\n#Testing for unit root/stationary\n#If p-value < 0.05 then no unit roots present.\nPanel.set <- plm.data(Grunfeld, index = c(\"firm\", \"year\"))\nlibrary(tseries)\nadf.test(Panel.set$invest, k=2)\n\n#Testing for heteroskedasticity\n#If p-value < 0.05 heteroskedasticity is present\nlibrary(lmtest)\nbptest(log(invest) ~ log(value) + log(capital) + factor(firm), \n data = Grunfeld, studentize=F)\n\n#The --vcovHC– function estimates three heteroskedasticity-consistent covariance estimators:\n #\"white1\" - for general heteroskedasticity but no serial correlation. Recommended for random effects.\n#\"white2\" - is \"white1\" restricted to a common variance within groups. Recommended for random effects.\n#\"arellano\" - both heteroskedasticity and serial correlation. Recommended for fixed effects.\n\n#The following options apply*:\n #HC0 - heteroskedasticity consistent. The default.\n #HC1,HC2, HC3 – Recommended for small samples. HC3 gives less weight to influential observations.\n #HC4 - small samples with influential observations\n #HAC - heteroskedasticity and autocorrelation consistent (type ?vcovHAC for more details)\n\n#Heteroskedasticity for Random Effects\ncoeftest(random) \ncoeftest(random, vcovHC) # Heteroskedasticity consistent coefficients\ncoeftest(random, vcovHC(random, type = \"HC3\")) # Heteroskedasticity consistent coefficients, type 3\nt(sapply(c(\"HC0\", \"HC1\", \"HC2\", \"HC3\", \"HC4\"), function(x) sqrt(diag(vcovHC(random, type = x)))))\n\n#Heteroskedasticity for Random Effects\ncoeftest(fixed)\ncoeftest(fixed, vcovHC) # Heteroskedasticity consistent coefficients\ncoeftest(fixed, vcovHC(fixed, method = \"arellano\")) # Heteroskedasticity consistent coefficients (Arellano)\ncoeftest(fixed, vcovHC(fixed, type = \"HC3\")) # Heteroskedasticity consistent coefficients, type 3\nt(sapply(c(\"HC0\", \"HC1\", \"HC2\", \"HC3\", \"HC4\"), function(x) sqrt(diag(vcovHC(fixed, type = x)))))\n" }, { "alpha_fraction": 0.38260868191719055, "alphanum_fraction": 0.3990338146686554, "avg_line_length": 29.441177368164062, "blob_id": "8046de411f22f84157a61c3f0da52eb97b6220cb", "content_id": "32947e759c263c01ce6e9832d821d9b321e55743", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 1035, "license_type": "no_license", "max_line_length": 71, "num_lines": 34, "path": "/1. Data Visualization/Visualization Services - Line Plot.R", "repo_name": "espin086/DataScienceToolKitInR", "src_encoding": "UTF-8", "text": "x <- 1:100\ny <- x + x*sin(x/5) + (x/2)*rnorm(100)\n\ndf <- as.data.frame(cbind(x,y))\n\njj.line <- function(x,y){\n p <- ggplot(df, aes(x = x, y = y))\n p <- p + geom_line(colour = \"light grey\") \n p <- p + geom_rug(sides=\"l\",col=\"red\" ,alpha=.3)\n \n \n p <- p + geom_hline(aes(yintercept=mean(y)), \n colour = \"red\", \n size=2, alpha=.3)\n \n p <- p + annotate(\"text\", \n x = 10, \n y = mean(y), \n label = paste(\"Mean: \", round(mean(y), digits=1)))\n \n \n jj_theme <- theme_bw(base_size = 12) + \n theme(axis.line = element_line(colour = \"black\"),\n panel.grid.major = element_blank(),\n panel.grid.minor = element_blank(),\n panel.border = element_blank(),\n panel.background = element_blank())\n \n p <- p + jj_theme\n \n p\n}\n\njj.line(df$x, df$y)\n" }, { "alpha_fraction": 0.6160188317298889, "alphanum_fraction": 0.6448763012886047, "avg_line_length": 30.425926208496094, "blob_id": "9082c85421554d497337ee595fb64b8ed2342722", "content_id": "b66f71b92dba4e5d6ea78213a16e80f2916cd23a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 1698, "license_type": "no_license", "max_line_length": 108, "num_lines": 54, "path": "/4. Optimization Models/Marketing Optimization with Reach.R", "repo_name": "espin086/DataScienceToolKitInR", "src_encoding": "UTF-8", "text": "\n################################################################\n\n\n#Importing data from Github\n\nlibrary(RCurl)\nlibrary(foreign)\n\n\nurl <- \"https://docs.google.com/spreadsheets/d/1Okuj7zW1yTibQ4jK_3eppBis7OYVp0LaFr3qXNeTbCs/pub?output=csv\"\ncost.reach <- getURL(url) \ncost.reach <- read.csv(textConnection(cost.reach), header = TRUE, skip =0)\n\n\nurl2 <- \"https://docs.google.com/spreadsheets/d/1xREig0Fmk7S7g9OxSVpD-o3cAO3ZzHofIwU289AHs00/pub?output=csv\"\ntarget <- getURL(url2) \ntarget <- read.csv(textConnection(target), header = TRUE, skip =0)\n\n\n################################################################\n\n#######################\n#Objective Function\ncost <- c(cost.reach[,2])\n\n#######################\n#Constraints\n\n#1. The goal is to reach 24 million boys cheaply, while meeting all other goals.\nrhs.constraint<- c(target[1,2])*-1\nlhs.constraint <- rbind(c(cost.reach[,3])*-1)\n\n\n#2. The goal is to reach 18 million women cheaply, while meeting all other goals.\nrhs.woman.constraint<- c(target[2,2])*-1\nrhs.constraint <- rbind(rhs.constraint, rhs.woman.constraint)\nlhs.woman.constraint <- rbind(c(cost.reach[,4])*-1)\nlhs.constraint <- rbind(lhs.constraint, lhs.woman.constraint)\n\n#3. The goal is to reach 24 million men cheaply, while meeting all other goals.\nrhs.men.constraint<- c(target[3,2])*-1\nrhs.constraint <- rbind(rhs.constraint, rhs.men.constraint)\nlhs.men.constraint <- rbind(c(cost.reach[,5])*-1)\nlhs.constraint <- rbind(lhs.constraint, lhs.men.constraint)\n\n#######################\n#Solving the Linear Programming Problem\nlibrary(linprog)\nanswer <- solveLP(cost, rhs.constraint, lhs.constraint, maximum = FALSE)\n\n\nanswer$opt\nanswer$solution\nanswer$con\n" }, { "alpha_fraction": 0.5823429822921753, "alphanum_fraction": 0.6086587309837341, "avg_line_length": 23.06122398376465, "blob_id": "2d2050f8236fccdce8dd452fba141b95368d55e1", "content_id": "6042f1658097737396946641834ff0044539661d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 1178, "license_type": "no_license", "max_line_length": 135, "num_lines": 49, "path": "/5. Data Products/0. NBC Early HQ App/server.R", "repo_name": "espin086/DataScienceToolKitInR", "src_encoding": "UTF-8", "text": "library(shiny)\nlibrary(UsingR)\nlibrary(caret)\n\n#Reading in the training\nsetwd(\"~/Documents/my-shiny-tools/1. Prediction App/\")\ndf <- read.csv(\"regressionraw_2016_03.csv\")\n\n#Removing film title & interaction terms\nmyvars <- names(df) %in% c(\"Film.Title\", \"Box...Days.to.HQ\") \ndf.clean <- df[!myvars]\n\n#Building Prediction Model\nfit.knn <- train(WW210 ~.,method=\"knn\",data=df.clean)\n\n#############################\n#Prediction Function\n\nDays.to.HQ <- 10\nWW.Box <- 10000\nHas.CAM. <- 1\nA <- 1\nAnimation <- 1\nAdventure <- 0\nComedy <- 0\nDrama <- 0\nRomantic.Comedy <- 0\nHorror.Thriller <- 0\nPG.13 <- 0\nR <- 1\n\ninputs <- data.frame(Days.to.HQ, WW.Box, Has.CAM., A, Animation, Adventure, Comedy, Drama, Romantic.Comedy, Horror.Thriller, PG.13, R )\n\n\n#Creating a function that takes inputs and uses fitted model to predict outcome\nmodel <- function(x) {predict(fit.knn, x)}\n\n\nshinyServer(\n function(input, output){\n \n output$DaystoHQ <- renderPrint({input$DaystoHQ})\n output$Genre <- renderPrint({input$Genre})\n \n output$Prediction <- renderPrint(model(inputs))\n \n \n }\n)" }, { "alpha_fraction": 0.5358255505561829, "alphanum_fraction": 0.5467289686203003, "avg_line_length": 29.547618865966797, "blob_id": "68446a9c66493cf5ccc2f789f96c8bae7f1adfea", "content_id": "4f40f95be197ef1872a4070fc56dc8610c9bd3d5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 1284, "license_type": "no_license", "max_line_length": 121, "num_lines": 42, "path": "/6. Web Tools/Scaper-BOM-Actors.R", "repo_name": "espin086/DataScienceToolKitInR", "src_encoding": "UTF-8", "text": "library(XML)\n\n# Step 1: construct URLs\nurls <- paste(\"http://www.boxofficemojo.com/people/?view=Actor&pagenum=\", 1:3, \"&sort=person&order=ASC&p=.htm\", sep = \"\")\n\n# Step 2: scrape website\nget_table <- function(u) {\n table <- readHTMLTable(u)[[3]]\n names(table) <- c(\"Actor\", \"Total.Gross\", \"Num.Movies\", \"Avg.Per.Movie\", \"No.1 Movie\", \"Gross.of.No.1.Movie\")\n df <- as.data.frame(lapply(table[-1, ], as.character), stringsAsFactors=FALSE)\n df <- as.data.frame(df, stringsAsFactors=FALSE)\n return(df)\n}\n\ndf <- do.call(\"rbind\", lapply(urls, get_table))\n\n\n# Step 3: clean dataframe\nclean_df <- function(df) {\n clean <- function(col) {\n col <- gsub(\"$\", \"\", col, fixed = TRUE)\n col <- gsub(\"%\", \"\", col, fixed = TRUE)\n col <- gsub(\",\", \"\", col, fixed = TRUE)\n col <- gsub(\"^\", \"\", col, fixed = TRUE)\n return(col)\n }\n \n df <- sapply(df, clean)\n df <- as.data.frame(df, stringsAsFactors=FALSE)\n return(df)\n}\ndf <- clean_df(df) \n\n\n# Step 4: set column types\ns <- c(2:4, 6)\ndf[, s] <- sapply(df[, s], as.numeric)\n\ndf$Studio <- as.factor(df$Studio)\n\nsetwd(\"~/Documents/my-toolbox/5. Web Tools\")\nwrite.csv(df, \"Scraper-BOM-Actors.csv\")\n\n" }, { "alpha_fraction": 0.7022613286972046, "alphanum_fraction": 0.7022613286972046, "avg_line_length": 20.486486434936523, "blob_id": "c72e4e40ef3fe85c767fb4e3a3e3970b272fbd45", "content_id": "e07a83005acd18aa08dd04308003da4ecdeba2ea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 796, "license_type": "no_license", "max_line_length": 80, "num_lines": 37, "path": "/0. Getting and Cleaning Data/1. Box Office Mojo Scrapers/Scraping-IMBD.R", "repo_name": "espin086/DataScienceToolKitInR", "src_encoding": "UTF-8", "text": "\nlibrary(dplyr)\nlibrary(pbapply)\nlibrary(omdbapi)\n\n#Reading in data with list of movies to examine\nsetwd(\"~/Desktop\")\n\ntitle.key <- read.csv(\"titlekey.csv\", stringsAsFactors = FALSE)\ntitle.key <- title.key[which(title.key$imdbid != \"\"),]\nimdbid <- as.vector(title.key$imdbid)\n\n#Feeding IMBD ID Numbers into function that will find movie info\ntitle.info <- lapply(imdbid, find_by_id)\ndf <- data.frame(matrix(unlist(title.info), nrow = length(title.info), byrow=T))\n\n#Renaming the columns with the appropriate titles from title.info list\nnames(df) <- c( \"Title\",\n\"Year\",\n\"Rated\",\n\"Released\",\n\"Runtime\",\n\"Genre\",\n\"Director\",\n\"Writer\",\n\"Actors\",\n\"Plot\",\n\"Language\",\n\"Country\",\n\"Awards\",\n\"Poster\",\n\"Metascore\",\n\"imdbRating\",\n\"imdbVotes\",\n\"imdbID\",\n\"Type\")\n\nwrite.csv(df, \"IMBD Rating - OMBD API.csv\")\n" }, { "alpha_fraction": 0.723809540271759, "alphanum_fraction": 0.7408163547515869, "avg_line_length": 42.235294342041016, "blob_id": "2c868519b6b1ef4825f5e2cc2d0d29ad71153642", "content_id": "c4ef5a0e7ab83e714da806b37879073a058b5709", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 1470, "license_type": "no_license", "max_line_length": 81, "num_lines": 34, "path": "/2. Regression Models/0. Cobb-Douglas Production Models/4. Cobb-Douglas Cost Functions - Conditional Demand Elasticities.R", "repo_name": "espin086/DataScienceToolKitInR", "src_encoding": "UTF-8", "text": "#Deriving the demand input elasticities - WARNING ASSUMES COBB-DOUGLAS\n#\n###############################\n#Input elasticity for change in PROP price by 1%\ne.props <- list()\ne.props[\"own.elasticity\"] <-as.numeric(costCD$coefficients[2] - 1)\ne.props[\"cross.price.elasticity.directors\"] <-as.numeric(costCD$coefficients[2])\ne.props[\"cross.price.elasticity.actors\"] <-as.numeric(costCD$coefficients[2])\ne.props\n\n#Input elasticity for change in DIRECTOR price by 1%\ne.directors <- list()\ne.directors[\"own.elasticity\"] <-as.numeric(costCD$coefficients[3] - 1)\ne.directors[\"cross.price.elasticity.props\"] <-as.numeric(costCD$coefficients[3])\ne.directors[\"cross.price.elasticity.actors\"] <-as.numeric(costCD$coefficients[3])\ne.directors\n\n#Input elasticity for change in ACTORS price by 1%\ne.actors <- list()\ne.actors[\"own.elasticity\"] <-as.numeric(costCD$coefficients[4] - 1)\ne.actors[\"cross.price.elasticity.directors\"] <-as.numeric(costCD$coefficients[4])\ne.actors[\"cross.price.elasticity.actors\"] <-as.numeric(costCD$coefficients[4])\ne.actors\n\n\n#Input elasticity for change in output by 1%\ne.output <- list()\ne.output[\"cross.price.elasticity.props\"] <-as.numeric(costCD$coefficients[5])\ne.output[\"cross.price.elasticity.directors\"] <-as.numeric(costCD$coefficients[5])\ne.output[\"cross.price.elasticity.actors\"] <-as.numeric(costCD$coefficients[5])\ne.output\n\n#Elasticity of size - if cost increase by 1% then output would increase by 2.67%\n1/as.numeric(costCD$coefficients[5])\n" }, { "alpha_fraction": 0.6269209384918213, "alphanum_fraction": 0.655804455280304, "avg_line_length": 38.43065643310547, "blob_id": "94df9ef2ec56664eaa778379233ce946fefa60a4", "content_id": "38a38767046166b9d3c752e9835767223d24ee57", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 5401, "license_type": "no_license", "max_line_length": 452, "num_lines": 137, "path": "/4. Optimization Models/RegressionFeedsIntoLP.R", "repo_name": "espin086/DataScienceToolKitInR", "src_encoding": "UTF-8", "text": "######################################################\n#The following code performs a marketing mix model\n #1) Measures marketing effectiveness\n #2) Uses effective measures along with costs in optimization\n######################################################\n\n#Importing marketing and sales data\nsetwd(\"~/Documents/Primer on Linear Programming/2. Simple Example - Data and Code\")\ndf <- read.csv(\"advertising.csv\")\n\nmodel <- lm(Sales ~ TV + Radio, data = df) #Modeling marketing's impact on sales\nsummary(model)\n\n\n######################################################\n#1) Establishing the 'Base' optimization\nlibrary(linprog)\n\n#Objective - maximize sales by pulling the ratio & TV levers\ncvec <- c(Radio = model[[1]][[3]], \n TV = model[[1]][[2]]) \n\n## Constraints (quasi-fix factors)\nbvec <- c( RadioBudget = 23.275,\n TVBudget = 147.04,\n TotalBudget = 170.315) #Add or remove 3.65 for marketing mix problem\nbvec\n\n## Needs of Production activities\nAmat <- matrix(0, length(bvec), length(cvec))\nrownames(Amat) <- names(bvec)\ncolnames(Amat) <- names(cvec)\n\n\n#specifying LHS of radio budget constraint\nAmat[\"RadioBudget\", \"Radio\"] <- 1\nAmat[\"RadioBudget\", \"TV\"] <- 0\n\n#specifying LHS of TV budget constraint\nAmat[\"TVBudget\", \"Radio\"] <- 0\nAmat[\"TVBudget\", \"TV\"] <- 1\n\n#specifying LHS of Total budget constraint\nAmat[\"TotalBudget\", \"Radio\"] <- 1\nAmat[\"TotalBudget\", \"TV\"] <- 1\n\nAmat\n\n#Solving the linear program\nlp <- solveLP(cvec, bvec, Amat, maximum = TRUE )\nlp$solution\nlp$opt + model[[1]][[1]] #Adding intercept value to model results\n\n#As we can see when we use the mean inputs we get the mean output from the data from the optimization results\nsummary(df)\n\n######################################################\n#Q: You mention to the marketing department that Radio 3 times as effective as TV, so they ask you what would happen if they kept the total budget the same but let the Radio budget go up by 20%?\n\n\n######################################################\n#Q: You mention to the marketing department that Radio 3 times as effective as TV, so they ask you what would happen if they kept the total budget the same but let the Radio budget go up by 20%?\n\nbvec.2 <- c( RadioBudget = 23.275*1.2,\n TVBudget = 147.04,\n TotalBudget = 170.315)\n\nbvec.2\nbvec\n\n#A we see that sales goes up by 5% and there is a rebalancing inputs \nlp.2 <- solveLP(cvec, bvec.2, Amat, maximum = TRUE )\nlp.2$solution\nlp.2$opt + model[[1]][[1]]\nsummary(df)\n\n\n######################################################\n#Q: It seems that other companies have noticed the effectiveness of radio ads on their sales and started purchasing additional ads. Radio stations faced with rising demand decide to double their prices, what is our response to maximize sales?\n\n## Needs of Production activities\nAmat.2 <- matrix(0, length(bvec), length(cvec))\nrownames(Amat.2) <- names(bvec)\ncolnames(Amat.2) <- names(cvec)\n\n#specifying LHS of radio budget constraint\nAmat.2[\"RadioBudget\", \"Radio\"] <- 2\nAmat.2[\"RadioBudget\", \"TV\"] <- 0\n\n#specifying LHS of TV budget constraint\nAmat.2[\"TVBudget\", \"Radio\"] <- 0\nAmat.2[\"TVBudget\", \"TV\"] <- 1\n\n#specifying LHS of Total budget constraint\nAmat.2[\"TotalBudget\", \"Radio\"] <- 2\nAmat.2[\"TotalBudget\", \"TV\"] <- 1\n\nAmat.2\n\n#We continue to invest in radio but less than before our sales decrease by 14% as our budget doesn't go as far as it used to\nlp.3 <- solveLP(cvec, bvec.2, Amat.2, maximum = TRUE )\nlp.3$solution\nlp.3$opt + model[[1]][[1]]\nsummary(df)\n\n######################################################\n#Q: TV stations realize that the popularity of radio is increasing and cannibalizing their business, so they run a few focus groups and find out that people love ads with babies and puppies in them. A year later you analyze the effectiveness of TV adds and see that TV ads are now driving 150% more sales than they used to, the new content has made TV ads more effective. Given the current market what should be done and how is the business impacted?\n\ncvec.2 <- c(Radio = model[[1]][[3]], \n TV = model[[1]][[2]]*2.50) \ncvec.2\n\nlp.4 <- solveLP(cvec.2, bvec.2, Amat.2, maximum = TRUE )\nlp.4$solution\nlp.4$opt + model[[1]][[1]]\n\nsummary(df)\n\n\n######################################################\n#Q: The CMO of marketing is happy with the job you've done so far and asks you whether or not the company should diversify it's adverising to include newspapers. He connects you with IT who provides you marketing data on newspapers and tells you that he wants to increase the budget by 25% to accomodate the new tactic.\n\nmodel.2 <- lm(Sales ~ TV + Radio + Newspaper, data = df) \nsummary(model.2)\n\n#A: You run the model and find that newspapers do not life sales in a statistically significant manner. However, you realize that if there is extra budget, it may be wise to quantify what impact the increased budget will have on on sales given existing marketing channels (TV, Radio)\n\nbvec.3 <- c( RadioBudget = 23.275*1.2*1.25,\n TVBudget = 147.04*1.25,\n TotalBudget = 170.315*1.25)\n\nlp.5 <- solveLP(cvec.2, bvec.3, Amat.2, maximum = TRUE )\nlp.5$opt + model[[1]][[1]]\nlp.5$solution\nsummary(df)\n\n#A Part 2: You bring back the updated results and recommendations and the CMO is happy to avert wasteful spending on newspapers and instead follows your recommendations proceeding to another great year" }, { "alpha_fraction": 0.6567164063453674, "alphanum_fraction": 0.6639383435249329, "avg_line_length": 32.96721267700195, "blob_id": "3282367f2d50656c6e60a5810a2be6e0c2c8f582", "content_id": "461a3b03ce95c14bca72b37c12e76e2c66aaf2d4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 2077, "license_type": "no_license", "max_line_length": 142, "num_lines": 61, "path": "/3. Machine Learning Models/1. Text Classification/1. Data Cleaning.R", "repo_name": "espin086/DataScienceToolKitInR", "src_encoding": "UTF-8", "text": "#WARNING: Convert all data into text and ensure ANSI encoding can use this site to convert: http://utils.paranoiaworks.org/diacriticsremover/\n\n\n#init\nlibs<- c(\"tm\", \"plyr\", \"class\", \"reshape\")\nlapply(libs, require, character.only = TRUE)\n\n\n#set options\noptions(stringsAsFactors = FALSE)\n\n#set parameters\n#1. Not Contacted, 2. Phone Screened, 3. In Person Interview\n#0. Test 1, 0. Test 2\ncandidates <- c(\"1. Not Contacted\",\"4. Phone or In Person\") \npathname <- \"/Users/jjespinoza/Documents/Text Classification - Resumes/1. Data\"\n\n\n#Clean text\ncleanCorpus <- function(corpus){\n corpus.tmp <- tm_map(corpus, removePunctuation)\n corpus.tmp <- tm_map(corpus.tmp, stripWhitespace)\n #corpus.tmp <- tm_map(corpus.tmp, tolower)\n corpus.tmp <- tm_map(corpus.tmp, content_transformer(tolower))\n corpus.tmp <- tm_map(corpus.tmp, removeWords, stopwords(\"english\"))\n return(corpus.tmp)\n}\n\n#Build a Term-Document-Matrix(TDM)\ngenerateTDM <- function(cand, path){\n s.dir <- sprintf(\"%s/%s\", path, cand)\n #s.cor <- Corpus(DirSource(directory = s.dir, encoding = \"ANSI\"))\n s.cor <- VCorpus(DirSource(directory = s.dir), readerControl = list(reader=readPlain))\n s.cor.cl <- cleanCorpus(s.cor)\n s.tdm <- TermDocumentMatrix(s.cor.cl)\n s.tdm <- removeSparseTerms(s.tdm, 0.7)\n result <- list(name = cand, tdm = s.tdm)\n}\n\ntdm <- lapply(candidates, generateTDM, path = pathname)\n\n#attach interview result to matrix\nbindCandidateToTDM <- function(tdm){\n s.mat <- t(data.matrix(tdm[[\"tdm\"]]))\n s.df <- as.data.frame(s.mat, stringsAsFactors = FALSE)\n s.df <- cbind(rep(tdm[[\"name\"]], nrow(s.df)), s.df)\n #colnames(s.df)[ncol(s.df)] <- \"interviewresult\"\n}\n\ncandTDM <- lapply(tdm, bindCandidateToTDM)\n#stack\ntdm.stack <- do.call(rbind.fill, candTDM)\ntdm.stack[is.na(tdm.stack)] <- 0\n\n#Renaming target variable \ncolnames(tdm.stack)[1] <- \"target\"\n\n#Exporting Clean Dataset\nsetwd(\"~/Documents/Text Classification - Resumes/1. Data\")\n\nwrite.csv(tdm.stack, \"TDM.csv\", row.names = FALSE)\n\n\n\n\n\n" }, { "alpha_fraction": 0.39635035395622253, "alphanum_fraction": 0.407299280166626, "avg_line_length": 30.136363983154297, "blob_id": "125c0dba3307b44b77196b90737a682e4d1995c4", "content_id": "60476e65eb9f7a27f855b060192117d2eb78ed87", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 1370, "license_type": "no_license", "max_line_length": 76, "num_lines": 44, "path": "/1. Data Visualization/Visualization Services - Histogram.R", "repo_name": "espin086/DataScienceToolKitInR", "src_encoding": "UTF-8", "text": "###################\n#JJ Visualization Library\n\n#Histogram\nx <- rnorm(1000)\n\n\njj.hist <- function(x){\n \n \n x <- as.numeric(x)\n bw <- (2 * IQR(x) / length(x)^(1/3))\n \n library(ggplot2) \n x <- as.data.frame(x)\n \n #Creating the histogram with density plot, rugs, and mean displayed\n m <- ggplot(x, aes(x = x))\n m <- m + geom_histogram(colour = \"white\",\n fill = \"light grey\", \n binwidth = bw)\n m <- m + geom_rug(sides=\"b\",col=\"red\" ,alpha=.3)\n m <- m + geom_vline(aes(xintercept=mean(x, na.rm=T)), \n color=\"red\", size=2,alpha=.3)\n m <- m + annotate(\"text\", \n x = mean(x$x), \n y = 0, \n label = paste(\"Mean: \", round(mean(x$x), digits=1)))\n \n #Adding the plot theme\n jj_theme <- theme_bw(base_size = 12) + \n theme(axis.line = element_line(colour = \"black\"),\n panel.grid.major = element_blank(),\n panel.grid.minor = element_blank(),\n panel.border = element_blank(),\n panel.background = element_blank()) \n \n m <- m + jj_theme \n \n m\n \n}\n\njj.hist(x)\n" }, { "alpha_fraction": 0.3094170391559601, "alphanum_fraction": 0.3094170391559601, "avg_line_length": 26.375, "blob_id": "b7e48d9b7e6d60d097d0a6ea6121bd1d51ce6ab9", "content_id": "8715eec7fb2adaa72938078160a0190f9bd3f95f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 446, "license_type": "no_license", "max_line_length": 57, "num_lines": 16, "path": "/5. Data Products/0. App Template/App.R", "repo_name": "espin086/DataScienceToolKitInR", "src_encoding": "UTF-8", "text": "library(shiny)\n\n#########################################################\n#UI\nui <- pageWithSidebar(headerPanel(\"Test\"),\n sidebarPanel(),\n mainPanel()\n )\n\n#########################################################\n#Server \nserver <- function(input, output){}\n\n#########################################################\n#Shiny App Function\nshinyApp(ui = ui, server = server)\n " }, { "alpha_fraction": 0.42473119497299194, "alphanum_fraction": 0.4313022792339325, "avg_line_length": 26.850000381469727, "blob_id": "e724cc0482fadd08622170134d13f34cb8e2dee4", "content_id": "c2146dec1f5056f2d481f14a5c3ff565c2f4d588", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 1674, "license_type": "no_license", "max_line_length": 74, "num_lines": 60, "path": "/3. Machine Learning Models/0. General Machine Learning Model/0. Source Code - ML Algorithms.R", "repo_name": "espin086/DataScienceToolKitInR", "src_encoding": "UTF-8", "text": "library(caret) #used to train models\nlibrary(doMC)\nregisterDoMC(cores = 4)\n\n#######################################\n#Training Methodology\nset.seed(2015)\nfitControl <- trainControl(## 10-fold CV\n method = \"repeatedcv\",\n number = 3,\n ## repeated ten times\n repeats = 10)\n\n\n#######################################\n#Machine Learning Models\n\n#List of models to use based on top algorithms used in Kaggle Competitions\n#model.list <- c(\"xgbTree\", \"rf\", \"nnet\" , \"glm\")\n\n#for (i in 1:length(model.list)){\n #cat(model.list[i]) <- train(target ~ ., \n #method=model.list[i],\n #trControl = fitControl, \n #data = training)\n#}\n\n\nrun.xgbTree <- function(training, target){\n xgbTree <- train(target ~ ., \n method=\"xgbTree\",\n trControl = fitControl, \n data = training) \n xgbTree <- list(xgbTree)\n return(scored)\n}\n\nrun.models <- function(training, target){\n\n\n \n rf <- train(target ~ ., \n method=\"rf\",\n trControl = fitControl, \n data = training)\n \n nnet <- train(target ~ ., \n method=\"nnet\",\n trControl = fitControl, \n data = training)\n \n glm <- train(target ~ ., \n method=\"glm\",\n trControl = fitControl, \n data = training)\n \n scored <- list(xgbTree, rf , nnet, glm)\n return(scored)\n\n}\n\n\n\n" }, { "alpha_fraction": 0.6459110379219055, "alphanum_fraction": 0.6720229387283325, "avg_line_length": 25.386363983154297, "blob_id": "92cdc55a0a5a3a64fb643ed48a037316a88576ba", "content_id": "6881f9d4071dd0d37e424b7a6f0ccc993a173f97", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 6970, "license_type": "no_license", "max_line_length": 93, "num_lines": 264, "path": "/0. Getting and Cleaning Data/00. Importing Different Data into R.R", "repo_name": "espin086/DataScienceToolKitInR", "src_encoding": "UTF-8", "text": "\n###############################################################\n#Downloading CSV File from the Internet and importin it to R\nsetwd(\"C:/Users/ESPIJ090.WDW/datasciencecoursera - data/\")\n#Creates a folder for data if there isn't one already.\nif (!file.exists(\"data\")) {\n dir.create(\"data\")\n}\n\nfileUrl <- \"https://data.baltimorecity.gov/api/views/dz54-2aru/rows.csv?accessType=DOWNLOAD\"\ndownload.file(fileUrl, destfile = \"./data/cameras.csv\")\ndateDownloaded <- date()\n\ncameraData <- read.csv(\"./data/cameras.csv\")\n\n\n###############################################################\n#Downloading XLS File from the Internet and importin it to R\nif(!file.exists(\"data\")){dir.create(\"data\")}\nfileUrl <- \"https://data.baltimorecity.gov/api/views/dz54-2aru/rows.xlsx?accessType=DOWNLOAD\"\ndownload.file(fileUrl,destfile=\"./data/cameras.xlsx\")\ndateDownloaded <- date()\n\nlibrary(xlsx)\ncameraData <- read.xlsx(\"./data/cameras.xlsx\",sheetIndex=1,header=TRUE)\nhead(cameraData)\n\n###############################################################\n#Downloading XML and importin it to R\n\nlibrary(XML)\nfileUrl <- \"http://www.w3schools.com/xml/simple.xml\"\ndoc <- xmlTreeParse(fileUrl,useInternal=TRUE)\nrootNode <- xmlRoot(doc)\nxmlName(rootNode)\n\n#Exploring XLM\nnames(rootNode)\nrootNode[[1]]\nrootNode[[1]][[1]]\n\n#Extracting menu items and prices.\nxpathSApply(rootNode,\"//name\",xmlValue)\nxpathSApply(rootNode,\"//price\",xmlValue)\nxpathSApply(rootNode,\"//calories\",xmlValue)\n\n#Programmatically extra XML data\nxmlSApply(rootNode,xmlValue)\n\n#Extracting menu items and prices.\nxpathSApply(rootNode,\"//name\",xmlValue)\nxpathSApply(rootNode,\"//price\",xmlValue)\nxpathSApply(rootNode,\"//calories\",xmlValue)\n\n#ESPN Example\n\n\nfileUrl <- \"http://espn.go.com/nfl/team/_/name/bal/baltimore-ravens\"\ndoc <- htmlTreeParse(fileUrl,useInternal=TRUE)\n\nscores <- xpathSApply(doc,\"//li[@class='score']\",xmlValue)\nteams <- xpathSApply(doc,\"//li[@class='team-name']\",xmlValue)\nstatus <- xpathSApply(doc,\"//li[@class='game-status']\",xmlValue)\n\nscores\nteams\nstatus\n\n###############################################################\n#Downloading JSON Files\nlibrary(jsonlite)\njsonData <- fromJSON(\"https://api.github.com/users/jtleek/repos\")\nnames(jsonData)\n\n#Exploring the JSON file\njsonData$name\nnames(jsonData$owner)\njsonData$owner$login\n\n#Writing data frames to JSON\nmyjson <- toJSON(iris, pretty=TRUE)\ncat(myjson)\n\n#Writing from JSON to data frame\niris2 <- fromJSON(myjson)\nhead(iris2)\n\n###############################################################\n#Data Table Operations\n\nlibrary(data.table)\nDT = data.table(x=rnorm(9),y=rep(c(\"a\",\"b\",\"c\"),each=3),z=rnorm(9))\nhead(DT,3)\n\n#Subseting Rows\nDT[2,]\nDT[c(2,3)]\nDT[DT$y==\"a\",]\n\n#Summarizing Variables\nDT[,list(mean(x),sum(z),sum(x))]\nDT[,table(y)]\n\n\n#Adding new columns\nDT[,w:=z^2]\nDT2 <- DT\nDT[, y:= 2]\n\nDT[,a:=x>0]\nDT[,b:= mean(x+w),by=a]\n\n\n#Counting the elements of a factor variable, built in special variable\nset.seed(123);\nDT <- data.table(x=sample(letters[1:3], 1E5, TRUE))\nDT[, .N, by=x]\n\n#Creating keys for subsetting\nDT <- data.table(x=rep(c(\"a\",\"b\",\"c\"),each=100), y=rnorm(300))\nsetkey(DT, x)\nDT['a']\n\n#Creating keys for subsetting.\nDT1 <- data.table(x=c('a', 'a', 'b', 'dt1'), y=1:4)\nDT2 <- data.table(x=c('a', 'b', 'dt2'), z=5:7)\nsetkey(DT1, x); setkey(DT2, x)\nmerge(DT1, DT2)\n\n#Fast reading of large files.\nbig_df <- data.frame(x=rnorm(1E6), y=rnorm(1E6))\nfile <- tempfile()\n\nwrite.table(big_df, file=file, row.names=FALSE, col.names=TRUE, sep=\"\\t\", quote=FALSE)\nsystem.time(fread(file))\n\n\n####################\n#MySQL\nlibrary(RMySQL)\n\n#Show number of databases.\nucscDb <- dbConnect(MySQL(),user=\"genome\",host=\"genome-mysql.cse.ucsc.edu\")\nresult <- dbGetQuery(ucscDb,\"show databases;\")\ndbDisconnect(ucscDb)\n\n#Show tables in a particular database.\nhg19 <- dbConnect(MySQL(),user=\"genome\", db=\"hg19\",host=\"genome-mysql.cse.ucsc.edu\")\nallTables <- dbListTables(hg19)\nlength(allTables)\nallTables[1:5]\n\n#Get the dimensions of a particular table.\ndbListFields(hg19,\"affyU133Plus2\")#Counts variables.\ndbGetQuery(hg19, \"select count(*) from affyU133Plus2\")#Counts observations.\n\n#Read a table into R.\naffyData <- dbReadTable(hg19, \"affyU133Plus2\")\nhead(affyData)\n\n#Select only a specific subset.\n#selecting based on values in a variable.\nquery <- dbSendQuery(hg19, \"select * from affyU133Plus2 where misMatches between 1 and 3\")\naffyMis <- fetch(query)\nquantile(affyMis$misMatches)\n\n#Selecting only the top 10 observations.\naffyMisSmall <- fetch(query,n=10); dbClearResult(query);\n\n#Again, must close the connection.\ndbDisconnect(hg19)\n\n####################\n#HDF5\n\nsource(\"http://bioconductor.org/biocLite.R\")\nbiocLite(\"rhdf5\")\n\n#Creates interface with hdf5 databases\nlibrary(rhdf5)\ncreated = h5createFile(\"example.h5\")\ncreated\n\n#Create Groups\ncreated = h5createGroup(\"example.h5\",\"foo\")\ncreated = h5createGroup(\"example.h5\",\"baa\")\ncreated = h5createGroup(\"example.h5\",\"foo/foobaa\")\nh5ls(\"example.h5\")\n\n#Write data to groups\nA = matrix(1:10,nr=5,nc=2)\nh5write(A, \"example.h5\",\"foo/A\")\nB = array(seq(0.1,2.0,by=0.1),dim=c(5,2,2))\nattr(B, \"scale\") <- \"liter\"\nh5write(B, \"example.h5\",\"foo/foobaa/B\")\nh5ls(\"example.h5\")\n\n#Write a dataset.\ndf = data.frame(1L:5L,seq(0,1,length.out=5),\n c(\"ab\",\"cde\",\"fghi\",\"a\",\"s\"), stringsAsFactors=FALSE)\nh5write(df, \"example.h5\",\"df\")\nh5ls(\"example.h5\")\n\n#Reading data\nreadA = h5read(\"example.h5\",\"foo/A\")\nreadB = h5read(\"example.h5\",\"foo/foobaa/B\")\nreaddf= h5read(\"example.h5\",\"df\")\nreadA\n\n#Writing and reading chunks.\nh5write(c(12,13,14),\"example.h5\",\"foo/A\",index=list(1:3,1))\nh5read(\"example.h5\",\"foo/A\")\n\n\n####################\n#Web Data\n\n#Reading HTML Code\ncon = url(\"http://scholar.google.com/citations?user=HI-I6C0AAAAJ&hl=en\")\nhtmlCode = readLines(con)\nclose(con)\nhtmlCode\n\n#Parsing HTML with XML package.\nlibrary(XML)\nurl <- \"http://scholar.google.com/citations?user=HI-I6C0AAAAJ&hl=en\"\nhtml <- htmlTreeParse(url, useInternalNodes=T)\n\nxpathSApply(html, \"//title\", xmlValue)\n\nxpathSApply(html, \"//td[@id='col-citedby']\", xmlValue)\n\n#Same Example as above but with httr package.\nlibrary(httr); html2 = GET(url)\ncontent2 = content(html2,as=\"text\")\nparsedHtml = htmlParse(content2,asText=TRUE)\nxpathSApply(parsedHtml, \"//title\", xmlValue)\n\n#Accessing webistes with passwords - try this with jacksons SQL database!\npg2 = GET(\"http://httpbin.org/basic-auth/user/passwd\",authenticate(\"user\",\"passwd\"))\npg2\n\nnames(pg2)\n\n#Using Handles\ngoogle = handle(\"http://google.com\")\npg1 = GET(handle=google,path=\"/\")\npg2 = GET(handle=google,path=\"search\")\n\n\n####################\n#APIs\n\n#Getting data form Twitter\nmyapp = oauth_app(\"twitter\",\n key=\"yourConsumerKeyHere\",secret=\"yourConsumerSecretHere\")\nsig = sign_oauth1.0(myapp,\n token = \"yourTokenHere\",\n token_secret = \"yourTokenSecretHere\")\nhomeTL = GET(\"https://api.twitter.com/1.1/statuses/home_timeline.json\", sig)\n\n#Converting to JSON\njson1 = content(homeTL)\njson2 = jsonlite::fromJSON(toJSON(json1))\njson2[1,1:4]\n\n\n\n" }, { "alpha_fraction": 0.3218598961830139, "alphanum_fraction": 0.3466183543205261, "avg_line_length": 39.414634704589844, "blob_id": "33a28f54ba8e7a78d587b32ea3482ecf3b7e579c", "content_id": "872e54510b5d9687d279dbd5e8f7545cd28aa4b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 1656, "license_type": "no_license", "max_line_length": 130, "num_lines": 41, "path": "/5. Data Products/0. NBC Early HQ App/ui.R", "repo_name": "espin086/DataScienceToolKitInR", "src_encoding": "UTF-8", "text": "library(shiny)\n\n\nshinyUI(pageWithSidebar(\n headerPanel(\"Predictive Model\"),\n sidebarPanel(\n numericInput('DaystoHQ', 'Days to a High Quality Piracy Release', 0, min = 0, max = 100, step = 1),\n \n sliderInput('BO', \"What is the total expected Box Office?\", value = 10, min = 0, max = 500000000, step= 10000000),\n \n checkboxGroupInput('Genre', 'Genre of the Film', \n c(\"Animation\" = \"1\",\n \"Adventure\" = \"2\",\n \"Comedy\" = \"3\",\n \"Drama\" = \"4\",\n \"Romantic Comedy\" = \"5\",\n \"Horror/Thriller\" = \"6\")),\n \n checkboxGroupInput('Rating', 'Rating of Film', \n c(\"G\" = \"G\",\n \"PG\" = \"PG\",\n \"PG-13\" = \"PG-14\",\n \"R\" = \"R\"\n )),\n \n submitButton(\"Run Model\")\n ),\n mainPanel(\n h3(\"Given the set of imputs...\"),\n h5(\"Days to HQ:\"),\n verbatimTextOutput(\"DaystoHQ\"),\n h5(\"Film Genre:\"),\n verbatimTextOutput(\"Genre\"),\n \n h3(\"...a predictive model was created...\"),\n \n h3(\"...here is the model prediction\"),\n \n verbatimTextOutput(\"Prediction\")\n )\n))" }, { "alpha_fraction": 0.4115982949733734, "alphanum_fraction": 0.4163130521774292, "avg_line_length": 41.41999816894531, "blob_id": "71352c08ffb018de39e0ade8926ad25f48ba6632", "content_id": "71296ff2e53f4220d99716ab8706b7804a2406f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 2121, "license_type": "no_license", "max_line_length": 89, "num_lines": 50, "path": "/2. Regression Models/diagnostic - ols.R", "repo_name": "espin086/DataScienceToolKitInR", "src_encoding": "UTF-8", "text": "library(car)\nlibrary(datasets)\nfit <- lm(Fertility ~ ., data=swiss)\n\n############################################\n#function for ols diagnostics\nols.diagnostics <- function(ols.model){\n \n #############################################\n #The basic tool for diagnosing regression models are residual plots\n plot(ols.model, which = 1:6)\n \n #############################################\n #Deletion diagnostics is another tool to examine influential observations\n print(\"#############################################\")\n print(summary(influence.measures(ols.model)))\n \n print(\"#############################################\")\n print(\"TYPE:Testing for heteroskedasticity:BP test\")\n print(\"INTERPRETATION: if p-value less than 0.05 then heteroskedasticity exists\")\n print(\"#############################################\")\n library(lmtest)\n print(bptest(ols.model))\n \n print(\"#############################################\")\n print(\"TYPE: Testing for functional misspecification; RESET TEST\") \n print(\"INTERPRETATION: if p-value > 0.05 then no mispecificaiton\")\n print(\"#############################################\")\n print(resettest(ols.model))\n \n print(\"#############################################\")\n print(\"TYPE: Multicollinearity Test; VIF\") \n print(\"INTERPRETATION: if greater than 10 variable is multicollinear\") \n print(\"#############################################\")\n print(vif(fit))\n \n print(\"#############################################\")\n print(\"TYPE: Non-Linearity Test; Added Variable Plots\") \n print(\"INTERPRETATION: conditional regression plots for non-linearity\") \n print(\"#############################################\")\n avPlots(fit)\n \n print(\"#############################################\")\n print(\"REGRESSION MODEL\") \n print(\"#############################################\")\n summary(fit)\n}\n\n#Calling the function\nols.diagnostics(fit)\n" }, { "alpha_fraction": 0.6732085943222046, "alphanum_fraction": 0.6768598556518555, "avg_line_length": 28.945205688476562, "blob_id": "6a7fab2c35ce43bcae478f8538bdd00f0d503375", "content_id": "4eea6cffe447f86e5c94da12ad95020f1209ac6e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2191, "license_type": "no_license", "max_line_length": 98, "num_lines": 73, "path": "/0_software_engineering/example_logging.py", "repo_name": "espin086/DataScienceToolKitInR", "src_encoding": "UTF-8", "text": "import logging\n\n##############################\n#Getting Started with Logging\n##############################\n\n#this sets the type of logging that is done\n#Options include: \n#INFO\n#DEBUG\n#\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n#######\n#logging to a file\nhandler = logging.FileHandler('hello.log')\nhandler.setLevel(logging.INFO)\n#create a logging format to save into a file\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nhandler.setFormatter(formatter)\n#add formatting to log\nlogger.addHandler(handler)\n#begin logging\n\n\n##############################\n#Writing Log Recorrds at Proper Levels\n##############################\n\n########\n#use DEBUG logs inside of complex loops to find issues later\nmylist = [1,2,3,4]\ndef complex_algorithm(items):\n\tfor i, item in enumerate(items):\n\t\tprint(mylist[i])\n\t\tlogger.debug('{} iteration, item ={}'.format(i,item))\n# complex_algorithm(mylist)\n########\n#Use info for handling request or checking server states\ndef handle_requests(request):\n\tlogger.info('Handling requests {}'.format(request))\n\tresult = 'result'\n\tlogger.info('Return result {}'.format(result))\n\ndef start_server(port):\n\tlogger.info('Starting service at port {}'.format(port))\n\tlogger.info('Service is started')\n\n# handle_requests(request='myrequest')\n# start_server(port='4081')\n########\n#Use warnings when somethign important happens, but not an error (e.g. incorrect password entered)\ndef authenticate(user_name, password, ip_address):\n\tif user_name != 'USER_NAME' and password != 'PASSWORD':\n\t\tlogger.warn('Login attempt to {} from IP {}'.format(user_name, ip_address))\n\t\treturn False\n\n# authenticate('jj', 'test', '1234567')\n#user erors when something is wrong, for example an exception is thrown\ndef get_user_by_id(user_id):\n\tuser = input('id: ')\n\tif user is None:\n\t\tlogger.error('Cannot find user with user_id={}'.format(user_id))\n\t\treturn 'No user'\n\treturn 'example user'\n# get_user_by_id(None)\n#catching exceptions with logging\ntry:\n open('/path/to/does/not/exist', 'rb')\nexcept (SystemExit, KeyboardInterrupt):\n raise\nexcept Exception:\n logger.error('Failed to open file', exc_info=True)\n\n\n\n\n\n" }, { "alpha_fraction": 0.7202430963516235, "alphanum_fraction": 0.7271584272384644, "avg_line_length": 28.085365295410156, "blob_id": "f271c0681b9503c236ce7026bcb553fd54f181b6", "content_id": "ed87a2f3453f6f298f6ced1964449a548e14ac76", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "RMarkdown", "length_bytes": 4772, "license_type": "no_license", "max_line_length": 190, "num_lines": 164, "path": "/5. Data Products/1. R Markdown Document.Rmd", "repo_name": "espin086/DataScienceToolKitInR", "src_encoding": "UTF-8", "text": "---\ntitle: \"Exploratory Analysis - Kitchen Stoves\"\nauthor: \"JJ Espinoza\"\ndate: \"Sunday, August 23, 2015\"\noutput: word_document\n---\n\nThe following exploratory analysis examines data on product characteristics of kitchen stoves. \n\n\n**Stove Requirements**\n\nStainless steel\n\n\nDimensions of the stove: Depth = 25in, Width = 30in, Height 45in\n\n**Key Findings (statistically significant):**\n\n- The average price of a stainless steel stove with gas burners is $1,500\n\n- The dimensions of the stove we need is common\n\n- There is a strong correlation between price and weight; likely due to better materials\n\n- The capacity of the stove is also a key driver, specifically the capacity of the oven\n\n- The effectiveness of the stove is important but mainly driven by the heating capacity of the oven\n\n\n##Importing the data.\n\n```{r echo=TRUE}\ndata <- read.csv(\"C:/Users/ESPIJ090.WDW/Home - Kitchen Stoves/data/Kitchen Stove.csv\")\n```\n\n\n\n###We want to limit our search to stainless steel stoves only.\n\n```{r echo=TRUE}\nstainless <- data[which(data$Color==\"Stainless steel\" & data$CookingSurface==\"Gas: sealed burners\"),]\n```\n\n##Data Visualization and Analysis\n\n###What should we expect to pay for a stove?\n\n```{r echo=TRUE}\nhist(stainless$Price, col = \"red\", main=\"Stainless Stove Prices\", xlab = \"Price($)\")\n\nsummary(stainless$Price)\n```\n\n###What are the common dimmensions of stoves in relation to what we are looking for?\n\nWe are looking for a stove that is no more than 30in.\n\n```{r echo=TRUE}\nhist(stainless$Width, col = \"blue\", main=\"Width of Stainless Stoves \", xlab = \"Width (in)\")\nsummary(stainless$Width)\n```\n\nWe are looking for a stove that is close to 45 in height.\n```{r echo=TRUE}\nhist(stainless$Height, col = \"blue\", main=\"Height of Stainless Stoves \", xlab = \"Height (in)\")\nsummary(stainless$Height)\n```\n\nWe are looking for a stove that is close to 25 in depth.\n```{r echo=TRUE}\nhist(stainless$Depth, col = \"blue\", main=\"Depth of Stainless Stoves \", xlab = \"Depth (in)\")\nsummary(stainless$Depth)\n```\n\n###What are the tradeoffs between stove dimensions and price? \n\nThe correlation matrix shows a strong correlation between price and weight.\n```{r echo=TRUE}\nlibrary(car)\nscatterplot.matrix(~Price+Depth+Height+Width+Weight , data=stainless, main=\"Price Correlated with Dimensions of Stove\")\n```\n\nTaking a closer look one can see the clear relationship.\n\n```{r echo=TRUE}\nplot(y = stainless$Price, x = stainless$Weight, pch = 19, col = \"red\",main = \"Correlation between Weight and Price\", xlab=\"Weight (lbs)\", ylab = \"Price ($)\")\n\nabline(lm(stainless$Price~stainless$Weight), col=\"red\") # regression line (y~x) \n```\n\nRegression estimate of relationship between price and weight.\n```{r echo=TRUE}\nsummary(lm(Price~Weight, data = stainless))\n```\n\n###What other features are correlated with price?\n\n```{r echo=TRUE}\nscatterplot.matrix(~Price+ MainOvenCapacityCuFt+ NumberofElementsBurners , data=stainless, main=\"Price Correlated with Dimensions of Stove - Plot1\")\n```\n\n\n```{r echo=TRUE}\nscatterplot.matrix(~Price + MainOvenNumberofRackPositions + NumberofOvenRacks , data=stainless, main=\"Price Correlated with Dimensions of Stove - Plot2\")\n```\n\nTaking a closer look at the correlations betwen Number of Oven Racks and Price we see a negative relationship.\n\n```{r echo=TRUE}\nplot(y = stainless$Price, x = stainless$NumberofOvenRacks, pch = 19, col = \"red\",main = \"Correlation between Number of Oven Racks and Price\", xlab=\"Number of Oven Racks\", ylab = \"Price ($)\")\n\nabline(lm(stainless$Price~stainless$NumberofOvenRacks), col=\"red\") # regression line (y~x) \n```\n\nRegression estimate of relationship between price and number of oven racks.\n```{r echo=TRUE}\nsummary(lm(Price~NumberofOvenRacks, data = stainless))\n```\n\n###How does the effectiveness and efficiency correlate with price?\n\nThe scatterplot below shows that the higher the BTUs the lower the price, which seems counterintuitive.\n\n```{r echo=TRUE}\nscatterplot.matrix(~Price + BroilerBurnerBTUs + MainOvenBakeBurnerBTUs , data=stainless, main=\"Price Correlated with Effectiveness of Oven\")\n```\n\n\n\n##Regression Analysis\n```{r echo=TRUE}\nmodel <- lm(Price ~ Weight + MainOvenCapacityCuFt + NumberofOvenRacks + BroilerBurnerBTUs + MainOvenBakeBurnerBTUs , data=stainless )\n```\n\n```{r echo=TRUE}\nsummary(model)\n```\n\n```{r echo=TRUE}\nplot(model)\n```\n\n###How good does the regression model fit the data?\n\n```{r echo=TRUE}\nplot(x = predict(model, stainless), y = stainless$Price, main = \"Model Predictions vs. Actual Prices\", xlab = \"Predicted Price\", ylab = \"Actual Price\")\nabline(0, 1)\n```\n\n##The model accuracy is: \n\n```{r echo=TRUE}\nmean(abs((stainless$Price - predict(model, stainless))/stainless$Price), na.rm=TRUE)\n\n```\n\n###Appendix - Summary Statistics\n\nSummary statistics\n\n```{r echo=TRUE}\nsummary(stainless)\n```\n\n\n" }, { "alpha_fraction": 0.6457898616790771, "alphanum_fraction": 0.6840640306472778, "avg_line_length": 22.145160675048828, "blob_id": "34a3c796d8efdf314f9087968501227e88df9ca3", "content_id": "a219ee1a211905f5d380fc77a166b3c6e8ae418d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 1437, "license_type": "no_license", "max_line_length": 56, "num_lines": 62, "path": "/0. Getting and Cleaning Data/1. Box Office Mojo Scrapers/Scaper-BOM-Film Metadata.R", "repo_name": "espin086/DataScienceToolKitInR", "src_encoding": "UTF-8", "text": "######################################\n#Collecting film meta-data - Box Office Mojo\n\n\nlibrary(XML) #Used to parse out film data\n\nu <- \"http://www.boxofficemojo.com/movies/?id=2guns.htm\"\n\n#Contains a host of data\ntest.1 <- readHTMLTable(u)[[1]]\n\n#Contains a host of data\n#test.2 <- readHTMLTable(u)[[2]]#Empty\n\n#Contains a host of data\ntest.3 <- readHTMLTable(u)[[3]]\n\n#Contains a small table with key data\ntest.4 <- readHTMLTable(u)[[4]]\n\n\n#Contains a small table with key data\ntest.5 <- readHTMLTable(u)[[5]]\n\n#Contains a small table with key data\ntest.6 <- readHTMLTable(u)[[6]]\n\n#Contains a large table with key data\ntest.7 <- readHTMLTable(u)[[7]]\n\n#Contains a large table with key data\ntest.8 <- readHTMLTable(u)[[8]]\n\n#Contains a large table with key data\ntest.9 <- readHTMLTable(u)[[9]]\n\n#Contains a large table with key data\ntest.10 <- readHTMLTable(u)[[10]]\n\n#Contains a large table with key data\ntest.11 <- readHTMLTable(u)[[11]]\n\n#Contains a large table with key data\n#test.12 <- readHTMLTable(u)[[12]]#Empty\n\n#Contains a large table with key data\n#test.13 <- readHTMLTable(u)[[13]]#Empty\n\n#Contains a large table with key data\n#test.14 <- readHTMLTable(u)[[14]]#Empty\n\n#Table that contains the talent in the film\ntest.15 <- readHTMLTable(u)[[15]]\n\n#Ranking of the film\ntest.16 <- readHTMLTable(u)[[16]]\n\n#Further ranking of film\ntest.17 <- readHTMLTable(u)[[17]]\n\n#Further ranking of film\ntest.18 <- readHTMLTable(u)[[18]]\n\n\n" }, { "alpha_fraction": 0.5784916877746582, "alphanum_fraction": 0.5989974737167358, "avg_line_length": 27.128204345703125, "blob_id": "b7e2f7b1baa34eb8e4808dc28bcaedffd1570159", "content_id": "762c58a9d565269397b944dd52f0427bedeebfe3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 4389, "license_type": "no_license", "max_line_length": 75, "num_lines": 156, "path": "/3. Machine Learning Models/1. Text Classification/2.Machine Learning Models.R", "repo_name": "espin086/DataScienceToolKitInR", "src_encoding": "UTF-8", "text": "library(caret) #used to train models\nlibrary(caret)#machine learning modeling\nlibrary(doMC)\nregisterDoMC(cores = 4)\n\nset.seed(2015)\n\n#reading in training and test set\nsetwd(\"~/Documents/Text Classification - Resumes/1. Data\")\n\ndf <- read.csv(\"TDM.csv\")\n\ninBuild <- createDataPartition(y = df$target,p=0.7, list=FALSE)\ntest <- df[-inBuild,]\ntrain <- df[inBuild,]\n\n\n\n\n#######################################\n#Training Methodology\n\nfitControl <- trainControl(## 10-fold CV\n method = \"repeatedcv\",\n number = 10,\n ## repeated ten times\n repeats = 10)\n\n\n\n#######################################\n#Level 1 Models - on training data\n\nmodel.1 <- train(target ~ ., \n method=\"xgbTree\",\n trControl = fitControl, \n data = train)\n\n\nmodel.2 <- train(target ~ ., \n method=\"rf\", \n trControl = fitControl,\n data = train)\n\nmodel.3 <- train(target ~ ., \n method=\"knn\", \n trControl = fitControl,\n data = train)\n\nmodel.4 <- train(target ~ ., \n method=\"svmLinear\", \n trControl = fitControl,\n data = train)\n\n#Creating new data based for ensemble models\ntrain$model.1 <- predict(model.1, train)\ntrain$model.2 <- predict(model.2, train)\ntrain$model.3 <- predict(model.3, train)\ntrain$model.4 <- predict(model.4, train)\n\n\n\n\n#######################################\n#Level 1 - Ensemble Models - for greater accuracy\n\n#Training ensemble models on the training set with other model's predictors\nensemble.1 <- train(target ~ ., \n method=\"xgbTree\", \n trControl = fitControl,\n data = train)\n\nensemble.2 <- train(target ~ ., \n method=\"rf\", \n trControl = fitControl,\n data = train)\n\nensemble.3 <- train(target ~ ., \n method=\"knn\", \n trControl = fitControl,\n data = train)\n\nensemble.4 <- train(target ~ ., \n method=\"svmLinear\", \n trControl = fitControl,\n data = train)\n\ntrain$ensemble.1 <- predict(ensemble.1, train)\ntrain$ensemble.2<- predict(ensemble.2, train)\ntrain$ensemble.3 <- predict(ensemble.3, train)\ntrain$ensemble.4 <- predict(ensemble.4, train)\n\n\n\n\n#######################################\n#Level 2 - Ensemble Models\n\nfinal.ensemble <- train(target ~ ., \n method=\"xgbTree\",\n trControl = fitControl,\n data = train)\nfinal.ensemble\n\n\n#######################################\n#Accessing Model Accuracy on test data\ntest$model.1 <- predict(model.1, test)\ntest$model.2 <- predict(model.2, test)\ntest$model.3 <- predict(model.3, test)\ntest$model.4 <- predict(model.4, test)\ntest$ensemble.1 <- predict(ensemble.1, test)\ntest$ensemble.2 <- predict(ensemble.2, test)\ntest$ensemble.3 <- predict(ensemble.3, test)\ntest$ensemble.4 <- predict(ensemble.4, test)\ntest$final.ensemble <- predict(final.ensemble, test)\n\n\n\n#Out of Sample Error: 1) Predict on test set and examine confusion matrix\nconfusionMatrix(test$target, test$model.1)\nconfusionMatrix(test$target, test$model.2)\nconfusionMatrix(test$target, test$model.3)\nconfusionMatrix(test$target, test$model.4)\n\nconfusionMatrix(test$target, test$ensemble.1)\nconfusionMatrix(test$target, test$ensemble.2)\nconfusionMatrix(test$target, test$ensemble.3)\nconfusionMatrix(test$target, test$ensemble.4)\n\nconfusionMatrix(test$target, test$final.ensemble)\n\n#######################################\n#Accessing Model Accuracy on complete dataset\ndf$model.1 <- predict(model.1, df)\ndf$model.2 <- predict(model.2, df)\ndf$model.3 <- predict(model.3, df)\ndf$model.4 <- predict(model.4, df)\ndf$ensemble.1 <- predict(ensemble.1, df)\ndf$ensemble.2 <- predict(ensemble.2, df)\ndf$ensemble.3 <- predict(ensemble.3, df)\ndf$ensemble.4 <- predict(ensemble.4, df)\ndf$final.ensemble <- predict(final.ensemble, df)\n\n\nconfusionMatrix(df$target, df$model.1)\nconfusionMatrix(df$target, df$model.2)\nconfusionMatrix(df$target, df$model.3)\nconfusionMatrix(df$target, df$model.4)\n\nconfusionMatrix(df$target, df$ensemble.1)\nconfusionMatrix(df$target, df$ensemble.2)\nconfusionMatrix(df$target, df$ensemble.3)\nconfusionMatrix(df$target, df$ensemble.4)\n\nconfusionMatrix(df$target, df$final.ensemble)\n\n" }, { "alpha_fraction": 0.6599956154823303, "alphanum_fraction": 0.6707719564437866, "avg_line_length": 41.867923736572266, "blob_id": "385a5ac5f5047e6ac3222a6dd4f25e8d12aadbd9", "content_id": "12bf6a6350135b1b9f0eec16f59c61c004ccac00", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 4551, "license_type": "no_license", "max_line_length": 201, "num_lines": 106, "path": "/2. Regression Models/0. Cobb-Douglas Production Models/0. Introduction to Production Economics.R", "repo_name": "espin086/DataScienceToolKitInR", "src_encoding": "UTF-8", "text": "library(reshape) #Used to rename variables\nlibrary(\"micEcon\")#Used to calculate price indexes\n\n###################################\n#Importing original dataset\ndata(\"appleProdFr86\", package = \"micEcon\")\n\n#Produces help menu for original datase on French Apple producers\nhelp(\"appleProdFr86\", package = \"micEcon\")\n\n\n###################################\n#Data Dictionary\n\n#vCap costs of capital (including land)\n#vLab costs of labor (including remuneration of unpaid family labor)\n#vMat costs of intermediate materials (e.g. seedlings, fertilizer, pesticides, fuel) \n#qOut quantity index of all outputs (apples and other outputs)\n#pCap price index of capital goods\n#pLab price index of labor\n#pMat price index of materials\n#pOut price index of the aggregate outputú\n#adv use of advisory serviceú\n\n###################################\n#Renaming data for entertainment industry example\n\nappleProdFr86 <- rename(appleProdFr86, c(vCap = \"vProps\")) #props/equipment in a movie can be considered capital expenses\n\nappleProdFr86 <- rename(appleProdFr86, c(vLab = \"vDirector\")) #directors/producers can be though of as the labor going into the film\n\nappleProdFr86 <- rename(appleProdFr86, c(vMat = \"vActors\")) #actors/actress can be thought of as the raw materials in a film that are modeled by the director and props/equipment\n\nappleProdFr86 <- rename(appleProdFr86, c(qApples = \"qAdmission\")) #number of tickets sold for a movie may be considered the 'main' output of a film product\n\nappleProdFr86 <- rename(appleProdFr86, c(qOtherOut = \"qHomeEnt\")) #number of home entertainment sales/rentals may be considered other output from a film\n\nappleProdFr86 <- rename(appleProdFr86, c(qOut = \"qTotalView\")) #Box office and rentals can be considered the total viewership of a film\n\nappleProdFr86 <- rename(appleProdFr86, c(pCap = \"pProps\")) #price of capital may be considered the rental rate of props (hr?)\n\nappleProdFr86 <- rename(appleProdFr86, c(pLab = \"pDirector\")) #price of labor may be the contractual wage of a director per unit of labor (hr?)\n\nappleProdFr86 <- rename(appleProdFr86, c(pMat = \"pActors\")) #price of materials may be the contractual wage of an actor per unit of work (hr?)\n\nappleProdFr86 <- rename(appleProdFr86, c(pOut = \"pTotalView\")) #price of total output may be the weighted price of admission and home entertainment \n\n#Abbreviating the name of the dataset\ndat <-appleProdFr86\n\n###################################\n#Calculating input quantities by dividing total costs on input by input price\ndat$qProps <- dat$vProps / dat$pProps\ndat$qDirector <- dat$vDirector / dat$pDirector\ndat$qActors <- dat$vActors / dat$pActors\n\n###################################\n#Deriving cost measures\n\n#Total costs\ndat$cost <- with( dat, vProps + vDirector + vActors )\n\n#Variable costs\ndat$vCost <- with( dat, vDirector + vActors )\n\n\n###################################\n#Deriving profit and gross margins\ndat$profit <- with(dat, pTotalView * qTotalView - cost)\n\n#If capital is a quasi-fixed input and labor and materials are variable inputs, the movie producers’ gross margins can be calculated by following command:\n\ndat$vProfit <- with( dat, pTotalView * qTotalView - vDirector - vActors )\n\n\n###################################\n#Paasche, Laspeyres, and Fisher quantity indices of all three inputs manually\n\n#Paasche quantity index\ndat$XP <- with(dat,(vProps + vDirector + vActors)/ (mean(qProps) * pProps + mean(qActors) * pActors + mean(qActors) * pActors)) \n \n#Laspeyres quantity index \ndat$XL <- with(dat, (qProps *mean(pProps) + qDirector * mean(pDirector) + qActors * mean(pActors)) / (mean(qProps) * mean(pProps) + mean(qDirector) * mean(pDirector) + mean( qActors ) * mean(pActors)))\n\n#Fisher quantity index \ndat$X <- sqrt( dat$XP * dat$XL )\n\n#visualizing similarity in price indexes\nplot( dat$XP, dat$XL )\nplot( dat$XP, dat$XL, log = \"xy\" )\n\n\n###################################\n#Paasche, Laspeyres, and Fisher quantity indices of all three inputs using microecon pacakge\n\ndat$XP2 <- quantityIndex( c(\"pProps\", \"pDirector\", \"pActors\" ),\n c(\"qProps\", \"qDirector\", \"qActors\" ), \n data = dat, method = \"Paasche\" )\n\ndat$XL2 <- quantityIndex( c(\"pProps\", \"pDirector\", \"pActors\" ),\n c(\"qProps\", \"qDirector\", \"qActors\" ), \n data = dat, method = \"Laspeyres\" )\n\ndat$X2 <- quantityIndex( c(\"pProps\", \"pDirector\", \"pActors\" ),\n c(\"qProps\", \"qDirector\", \"qActors\" ), \n data = dat, method = \"Fisher\" )\n\n\n\n" }, { "alpha_fraction": 0.6580102443695068, "alphanum_fraction": 0.6653255224227905, "avg_line_length": 44.54999923706055, "blob_id": "dcca53e5e05613a2625197c74dc666db1235fad4", "content_id": "8953fa57d028f39825039e3da751ac8dda0b0db5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 2734, "license_type": "no_license", "max_line_length": 98, "num_lines": 60, "path": "/0. Getting and Cleaning Data/1. Box Office Mojo Scrapers/Scaper-BOM-Release Dates.R", "repo_name": "espin086/DataScienceToolKitInR", "src_encoding": "UTF-8", "text": "######################################\n#Collecting film meta-data - Box Office Mojo\nlibrary(XML) #Used to scrape film data\nlibrary(lubridate) #Used to format release dates\nlibrary(rattle) #Used to produce summary statistics after data collection and cleaning\n\n#Source of Data: http://www.boxofficemojo.com/schedule/?view=distributor&p=.htm\n\n#List of distributors, you can add additional lists by clicking on the link above\ndist <- as.list(c(\"buenavista\", \"fox\", \"paramount\", \"sony\",\"universal\", \"warnerbros\"))\n#Empty container to hold data per distributor\ndist.data <- list()\n\n#Web Scraping Function\nscrape.data <- function(x){\n #produces a URL to pass to an XML libray function based on distributor\n urls <- paste0(\"http://www.boxofficemojo.com/schedule/?view=distributor&id=\", x ,\".htm\")\n #Extrcting relevant table from the web\n input <- readHTMLTable(urls)[[1]]\n #The first few rows contain irrelevant header info, so they are deleted\n input.clean <- input[-c(1,2,3,4),]\n #Creates new variable to identify the distributor in the dataset\n input.clean$dist <- c(x)\n #Remaing the varibles in the dataframe\n names(input.clean) <- c(\"Movie\", \"Release.Date\", \"Distributor\")\n #Removing non-standard or missing dates (is the standard xx/xx/xx)\n clean.data <- input.clean[complete.cases(input.clean),]\n clean.data <- clean.data[ which(clean.data$Release.Date != \"TBD\"), ]\n clean.data <- clean.data[ which(clean.data$Release.Date != \"2016\"), ]\n clean.data <- clean.data[ which(clean.data$Release.Date != \"2017\"), ]\n clean.data <- clean.data[ which(clean.data$Release.Date != \"2018\"), ]\n #Saving results in a list, so an family of apply functions can loop through a list of dist\n \n clean.data$Release.Date <- as.Date(clean.data$Release.Date, \"%m/%d/%y\")\n \n #Extracting Useful Date Info\n clean.data$Release.Date.Year <- year(clean.data$Release.Date)\n clean.data$Release.Date.Month <- month(clean.data$Release.Date)\n clean.data$Release.Week.Year <- week(clean.data$Release.Date)\n clean.data$Release.Date.Weekday <- wday(clean.data$Release.Date, label = TRUE)\n \n \n \n dist.data[[x]] <- clean.data\n #The function output is this list\n dist.data\n}\n\n#Applying scraping function to distributor list using sapply fucntion\nrelease.list <- sapply(dist, scrape.data)\n\n#Stacking Data\nrelease.df <- do.call(\"rbind\", release.list)\n\n#Exporting Data for Further Analysis\nsetwd(\"~/Documents/HollywoodModels/0. Data/2. Clean Data\")\nwrite.csv(release.df, \"Future Film Release Dates.csv\")\n\n#Opening up Rattle to Explore Further\nrattle()\n\n" }, { "alpha_fraction": 0.6366666555404663, "alphanum_fraction": 0.6966666579246521, "avg_line_length": 36.625, "blob_id": "b05718a4d18b0fe9a93570a0175ab2d60c00e211", "content_id": "eb973d455766486d64354eb71f85bd0789e0b7e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 300, "license_type": "no_license", "max_line_length": 48, "num_lines": 8, "path": "/2. Regression Models/0. Cobb-Douglas Production Models/3. Cobb-Douglas Cost Functions - Optimal Cost Shares.R", "repo_name": "espin086/DataScienceToolKitInR", "src_encoding": "UTF-8", "text": "#Must run code that test assumptions first\n\nhist( dat$pProps * dat$qProps / dat$cost ) \nlines(rep(chProps,2),c(0,100),lwd=3 ) \nhist( dat$pDirector * dat$qDirector / dat$cost )\nlines(rep(chDirector,2),c(0,100),lwd=3 )\nhist( dat$pActors * dat$qActors / dat$cost )\nlines(rep(chActors,2),c(0,100),lwd=3 )" }, { "alpha_fraction": 0.7192845940589905, "alphanum_fraction": 0.7274494767189026, "avg_line_length": 44.105262756347656, "blob_id": "d481f7ee9d7475d90b97750452a6514f98411db4", "content_id": "a2034acb322a473172c2d62bc457e9bdf92aaedd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 2572, "license_type": "no_license", "max_line_length": 880, "num_lines": 57, "path": "/0. Getting and Cleaning Data/2. Stock and Economic Data Collector/Clean - All Stock Prices.R", "repo_name": "espin086/DataScienceToolKitInR", "src_encoding": "UTF-8", "text": "#####################################\n#The following program cleans and prepares the data for All Stock Prices.\n\ninfiles <- setwd(\"C:/Users/ESPIJ090.WDW/whalewisdom - data/\")\n\n\n#Importing all files.\nAllStockPrices <- read.csv(paste(infiles,\"/AllStockPrices.csv\",sep=\"\"))\n\n#Deleting columns (stocks) that only have NA values.\nAllStockPrices<-AllStockPrices[,colSums(is.na(AllStockPrices)) != nrow(AllStockPrices)]\n\n#Creating a date variable.\nAllStockPrices$date <- as.Date(as.character(AllStockPrices$X.1))\n\nAllStockPrices <- AllStockPrices[-1]\n\n#Melting and reshaping so data is tidy.\nlibrary(reshape)\nAllStockPrices <- melt(AllStockPrices, id=c(\"date\"))\nnames(AllStockPrices) <- c(\"date\",\"ticker\",\"stock_price\")\n\n#stocks_clean <- AllStockPrices[ which(AllStockPrices$ticker=='MMM' | AllStockPrices$ticker=='AXP'|AllStockPrices$ticker=='T' |AllStockPrices$ticker=='BA'|AllStockPrices$ticker=='CAT'|AllStockPrices$ticker=='CVX'|AllStockPrices$ticker=='CSCO'|AllStockPrices$ticker=='KO'|AllStockPrices$ticker=='DIS'|AllStockPrices$ticker=='DD'|AllStockPrices$ticker=='XOM'|AllStockPrices$ticker=='GE'|AllStockPrices$ticker=='GS'|AllStockPrices$ticker=='HD'|AllStockPrices$ticker=='IBM'|AllStockPrices$ticker=='INTC'|AllStockPrices$ticker=='JNJ'|AllStockPrices$ticker=='JPM'|AllStockPrices$ticker=='MCD'|AllStockPrices$ticker=='MRK'|AllStockPrices$ticker=='MSFT'|AllStockPrices$ticker=='NIKE'|AllStockPrices$ticker=='PFE'|AllStockPrices$ticker=='PG'|AllStockPrices$ticker=='TRV'|AllStockPrices$ticker=='UTX'|AllStockPrices$ticker=='UNH'|AllStockPrices$ticker=='VZ'|AllStockPrices$ticker=='WMT'), ]\n\n#Keep Specific Stock\n#stocks_clean <- AllStockPrices[ which(AllStockPrices$ticker=='DIS'), ]\n\n#All Stocks\nstocks_clean <- AllStockPrices\n\n#Function that lags key variables worth examining\nlibrary(DataCombine)\n\nslide.fun <- function(df = stocks_clean, variable = \"stock_price\" , groupby = \"ticker\", start = -1, end = -10){\n for (i in start:end){\n df <- slide(df, Var = variable, GroupVar = groupby ,slideBy = i)\n }\n}\n\n#Lagging stock price\nslide.fun(variable = \"stock_price\")\n\n\n#Computing returns\nstocks_clean$returns <- log(stocks_clean[,3]) - log(stocks_clean[,4])\n\n#Lagging returns\nslide.fun(variable = \"returns\")\n\n\n#Keeping only daily returns\nmyvars <- c(\"date\", \"ticker\",\"returns\",\"returns-1\",\"returns-2\",\"returns-3\",\"returns-4\",\"returns-5\",\"returns-6\",\"returns-7\",\"returns-8\",\"returns-9\",\"returns-10\")\nstocks_clean <- stocks_clean[myvars]\n\n\n#Exporting clean data set.\nwrite.csv(stocks_clean,paste(infiles,\"/AllStockPrices_clean.csv\",sep=\"\"))\n\n" } ]
29
saikiran030996/DPS-AI-CHALLENGE
https://github.com/saikiran030996/DPS-AI-CHALLENGE
900edef3cf1414f4de4f72244210b1b73c9f8ddc
a223591cb4d5da241132666e0b581a49a0ecabb1
1dc618131940d69cb01a6cf5f871f95d16562268
refs/heads/main
2023-09-05T16:10:46.192856
2021-11-03T19:00:19
2021-11-03T19:00:19
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.603960394859314, "alphanum_fraction": 0.6196369528770447, "avg_line_length": 25.955554962158203, "blob_id": "4ebecc2b6e141ad5f5978c88ee917c451e0314c2", "content_id": "5c3e7ec60510d22ce945a842df37765ed4e939d9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1212, "license_type": "no_license", "max_line_length": 77, "num_lines": 45, "path": "/app.py", "repo_name": "saikiran030996/DPS-AI-CHALLENGE", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom flask import Flask, request, render_template, jsonify\nimport pickle\n\napp = Flask(__name__)\n\nmodel = pickle.load(open(\"model.pkl\", \"rb\"))\n\n\n@app.route(\"/\", methods=['GET'])\ndef Home():\n return render_template('index.html')\n\n\n@app.route(\"/predict\", methods=[\"POST\"])\ndef predict():\n year = int(request.form.get('year')) - 2021\n month = int(request.form.get('month'))\n features = [[year, month]]\n prediction = int(model.predict(features)[0])\n\n return render_template(\n 'index.html',\n prediction_text='prediction value is {}'.format(prediction))\n\n\n@app.route(\"/api/predict\", methods=[\"POST\"])\ndef apiPredict():\n if \"year\" not in request.get_json() or \"month\" not in request.get_json():\n return {\"Error\": \"Year ans Month are required!\"}, 400\n\n data = request.get_json()\n year = data['year']\n month = data['month']\n\n if type(year) != int or month not in range(1, 13):\n return {\"Error\": \"Year ans Month must be a valid numbers!\"}, 400\n else:\n features = [[year - 2000, month]]\n prediction = int(model.predict(features)[0])\n return {'prediction': prediction}\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)" }, { "alpha_fraction": 0.5471014380455017, "alphanum_fraction": 0.5652173757553101, "avg_line_length": 23, "blob_id": "5a6ad51964d72604e32906ad1fe30f2639366569", "content_id": "8376f4ecbebc1a0dd66404bacaffbe5191ad0cbf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JSON", "length_bytes": 552, "license_type": "no_license", "max_line_length": 70, "num_lines": 23, "path": "/mission-3/package.json", "repo_name": "saikiran030996/DPS-AI-CHALLENGE", "src_encoding": "UTF-8", "text": "{\n \"name\": \"dps-ai-challenge\",\n \"version\": \"1.0.0\",\n \"description\": \"submit task \",\n \"main\": \"index.js\",\n \"scripts\": {\n \"start\": \"node submit.js\"\n },\n \"repository\": {\n \"type\": \"git\",\n \"url\": \"git+https://github.com/samirazazy/DPS-AI-CHALLENGE.git\"\n },\n \"author\": \"samir azazy\",\n \"license\": \"ISC\",\n \"bugs\": {\n \"url\": \"https://github.com/samirazazy/DPS-AI-CHALLENGE/issues\"\n },\n \"homepage\": \"https://github.com/samirazazy/DPS-AI-CHALLENGE#readme\",\n \"dependencies\": {\n \"axios\": \"^0.24.0\",\n \"hotp-totp-generator\": \"^1.1.3\"\n }\n}\n" }, { "alpha_fraction": 0.7049295902252197, "alphanum_fraction": 0.7302817106246948, "avg_line_length": 25.79245376586914, "blob_id": "48547110eb7b0ed4f56d50266f005688d2bd5a39", "content_id": "f5960141ae31da5da32652572d606623e440fae8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1425, "license_type": "no_license", "max_line_length": 293, "num_lines": 53, "path": "/README.md", "repo_name": "saikiran030996/DPS-AI-CHALLENGE", "src_encoding": "UTF-8", "text": "# DPS AI CHALLENGE\n\n## Description\nThis challenge for Artificial Intelligence Engineer Consists of 3 tasks.\n- Mission 1: Create a AI Model\n- Mission 2: Publish source code & Deploy\n- Mission 3: Sending the URL of the task\n\n## Demo link\nView demo <a href=\"https://dps-ai.herokuapp.com/\"><b>Here 💻</b></a>.\nOr use this API endpoint `dps-ai.herokuapp.com/api/predict` to returns your predictions.\n#### Note\nThe endpoint accepts a POST request with a JSON body like this:\n```\n{\n\"year\" : 2020,\n\"month\" : 10\n}\n```\nIt return prediction in the following format:\n```\n{\n\"prediction\" : value\n}\n```\n## DataFrame\n\nDownload the <a href=\"https://www.opengov-muenchen.de/dataset/monatszahlen-verkehrsunfaelle/resource/40094bd6-f82d-4979-949b-26c8dc00b9a7\"><b>Monatszahlen Verkehrsunfälle</b></a> Dataset from the München Open Data Portal. Here you see the number of accidents for specific categories per month.\n\n## Packages:\n- pandas\n- matplotlib\n- sklearn\n- pickle\n\n## Visualization:\nvisualization historically the number of accidents per category\n### Accidents Category Visualization:\n\n<img src=\"./images/accidents_category1.jpg\"/>\n<img src=\"./images/accidents_category2.jpg\"/>\n<img src=\"./images/accidents_category3.jpg\"/>\n<img src=\"./images/accidents_category4.jpg\"/>\n\n<br />\n\n### Number of accidents per category\n<img src=\"./images/category.jpg\"/>\n\n<br />\n\n### Number of accidents per Accident Type\n<img src=\"./images/type.jpg\"/>\n" } ]
3
qiulin/fabulous
https://github.com/qiulin/fabulous
21346058ef3ae72faab359c25496e2627a8a990c
fd4c1a16f4cebe40f83e46217fdcc6438f6cb087
469c5d8ca3d68fd7c683c3cd9061b8eb94887d73
refs/heads/master
2016-09-07T00:12:44.216686
2015-04-29T09:31:04
2015-04-29T09:31:04
34,267,170
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6818181872367859, "alphanum_fraction": 0.6818181872367859, "avg_line_length": 11.736842155456543, "blob_id": "48d1fc5e0d80f7b7d9963f03585b9ca413a12fb0", "content_id": "9645b48cdbc85302c00131ff0710df8f57f72ee2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 242, "license_type": "permissive", "max_line_length": 39, "num_lines": 19, "path": "/fabfile/__init__.py", "repo_name": "qiulin/fabulous", "src_encoding": "UTF-8", "text": "import java\nimport elasticsearch as es\nimport system\nimport git\n\nfrom fabric.api import local, task, env\n\n\nenv.user = \"root\"\nenv.DOWNLOAD_DIR = \"/install\"\n\n@task\ndef update():\n local(\"git pull\")\n\n\n@task\ndef list():\n local(\"fab --list\")\n" }, { "alpha_fraction": 0.7142857313156128, "alphanum_fraction": 0.7142857313156128, "avg_line_length": 14, "blob_id": "de823d8593670fca121a71bcfc5ea5200ae4ee46", "content_id": "e57409480bce3bb7d478bef8e092d1dddb08e292", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14, "license_type": "permissive", "max_line_length": 14, "num_lines": 1, "path": "/fabulous/zabbix.py", "repo_name": "qiulin/fabulous", "src_encoding": "UTF-8", "text": "# TODO: zabbix" }, { "alpha_fraction": 0.75, "alphanum_fraction": 0.75, "avg_line_length": 16, "blob_id": "82c4c2815d479af119a8451a60c370d4ea5cd778", "content_id": "0f88b13e45c6a205d434b732be8b728ec3ab8316", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16, "license_type": "permissive", "max_line_length": 16, "num_lines": 1, "path": "/fabulous/logstash.py", "repo_name": "qiulin/fabulous", "src_encoding": "UTF-8", "text": "# TODO: logstash" }, { "alpha_fraction": 0.5882353186607361, "alphanum_fraction": 0.5978928804397583, "avg_line_length": 22.244897842407227, "blob_id": "50254584cf6c9379a1a4314bfc84a2e62de8f8ed", "content_id": "687c5c685e33a9bd512c9ffe294ffbc8a1b72edb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1207, "license_type": "permissive", "max_line_length": 84, "num_lines": 49, "path": "/fabulous/java.py", "repo_name": "qiulin/fabulous", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# encoding: utf-8\n\n\"\"\"\nOracle Java JDK\n===========\n\n安装Oracle Java JDK\n\"\"\"\n\nfrom fabric.api import run, cd\n\n\nDEFAULT_VERSION = '7u80'\n\n\ndef install_from_mirror(version=DEFAULT_VERSION):\n \"\"\"\n 从内网镜像中下载安装JDK,默认下载到/install文件夹,安装到/usr/local/目录中\n \"\"\"\n\n from fabtools.require.files import directory as require_directory\n\n jdk_filename = version + '.tar.gz'\n\n jdk_url = 'http://code.zjol.com.cn/mirrors/server-jre/archive/%s' % jdk_filename\n\n require_directory('/install')\n with cd('/install'):\n run('rm -rf %s' % jdk_filename)\n run('wget %s -O %s' % (jdk_url, jdk_filename))\n require_directory('/usr/local/jdk')\n run('tar zxvf %s -C /usr/local/jdk' % jdk_filename)\n run('chmod 755 -R /usr/local/jdk/bin')\n\n _create_profile_d_file()\n\n\ndef _create_profile_d_file():\n '''\n 添加环境变量\n :return:\n '''\n from fabtools.require.files import file as requrie_file\n\n requrie_file('/etc/profile.d/java.sh', contents=\n 'export JAVA_HOME=\"/usr/local/jdk\"\\n' +\n 'export PATH=\"$JAVA_HOME/bin:$PATH\"\\n',\n mode='0755')\n" }, { "alpha_fraction": 0.692307710647583, "alphanum_fraction": 0.692307710647583, "avg_line_length": 13, "blob_id": "12a8ed25a6470c9add04e62cf0957ca73a77f068", "content_id": "d68aeea897ddf50bf69f48161b48903461694595", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13, "license_type": "permissive", "max_line_length": 13, "num_lines": 1, "path": "/fabulous/redis.py", "repo_name": "qiulin/fabulous", "src_encoding": "UTF-8", "text": "# TODO: redis" }, { "alpha_fraction": 0.6724137663841248, "alphanum_fraction": 0.6751360893249512, "avg_line_length": 18.678571701049805, "blob_id": "a0e3ef6d7cb73f4190aa1e49bab5c8f2ec7004a3", "content_id": "8763eae4a63ded212d46b585792cabb5acffc3f2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1102, "license_type": "permissive", "max_line_length": 97, "num_lines": 56, "path": "/fabulous/elasticsearch.py", "repo_name": "qiulin/fabulous", "src_encoding": "UTF-8", "text": "__author__ = 'qiulin'\n\n\"\"\"\nElasticSearch Install and Managment\n\"\"\"\n\nfrom fabric.api import run, cd\nfrom fabtools.require import directory as require_directory\n\n\nDEFAULT_VERSION = \"1.5.1\"\n\n\ndef install_from_rpm(version=DEFAULT_VERSION):\n elastic_filename = \"elasticsearch-%s.noarch.rpm\" % version\n elastic_url = \"https://download.elastic.co/elasticsearch/elasticsearch/%s\" % elastic_filename\n\n require_directory(\"/install\")\n with cd(\"/install\"):\n run(\"rm -rf %s\" % elastic_filename)\n run(\"wget %s -O %s\" % (elastic_url, elastic_filename))\n run(\"rpm -ivh %s\" % elastic_filename)\n\n run(\"/sbin/chkconfig --add elasticsearch\")\n\n\ndef install_from_source(version=DEFAULT_VERSION):\n pass\n\n\ndef enable():\n run(\"/sbin/chkconfig elasticsearch on\")\n\n\ndef disable():\n run(\"/sbin/chkconfig elasticsearch off\")\n\n\ndef start():\n run(\"service elasticsearch start\")\n\n\ndef stop():\n run(\"service elasticsearch stop\")\n\n\ndef restart():\n run(\"searvice elasticsearch restart\")\n\n\ndef status():\n run(\"service elasticsearch status\")\n\n\ndef config(*argv, **kwargs):\n pass\n" }, { "alpha_fraction": 0.6013006567955017, "alphanum_fraction": 0.6078038811683655, "avg_line_length": 21.97701072692871, "blob_id": "b9e6ffe889b8d7cb4816cb682559b2d931210709", "content_id": "3a8244f7c9b7512d229f73137eaf9f24c369bf4b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1999, "license_type": "permissive", "max_line_length": 103, "num_lines": 87, "path": "/fabulous/system.py", "repo_name": "qiulin/fabulous", "src_encoding": "UTF-8", "text": "__author__ = 'qiulin'\n\nfrom fabric.api import run, hide, settings\nfrom fabric.contrib.files import sed\nfrom fabric.contrib.files import upload_template as upload\n\nfrom fabtools.require.files import file as reqire_file\nfrom fabtools.utils import run_as_root\n\nfrom fabulous import config as c\n\n\nNTP_SERVER = 'cn.pool.ntp.org'\n\ndef get_ip():\n \"\"\"\n Get the host ip.\n \"\"\"\n with settings(hide('running', 'stdout')):\n return run(r\"ifconfig | sed -En 's/127.0.0.1//;s/.*inet (addr:)?(([0-9]*\\.){3}[0-9]*).*/\\2/p'\")\n\n\ndef init_centos():\n \"\"\"\n Initial system environment.\n \"\"\"\n _init_yum()\n _init_pip()\n _init_config()\n _init_service()\n disable_selinux()\n update_time()\n\n\ndef _init_yum():\n \"\"\"\n Initial centos yum install packages\n \"\"\"\n from fabtools.require.rpm import packages as require_packages\n\n require_packages([\n 'gcc',\n 'vim',\n 'unzip',\n 'make',\n 'wget',\n 'iotop',\n 'htop',\n 'ntpdate'\n ])\n\n\ndef _init_service():\n from fabtools.require.service import started as require_service\n with settings(warn_only=True):\n require_service('ntpdate')\n require_service('sshd')\n require_service('iptables')\n\n\ndef update_time():\n run('ntpdate %s' % NTP_SERVER)\n\n\ndef disable_selinux():\n sed('/etc/selinux/config', 'SELINUX=enforcing', 'SELINUX=permissive')\n sed('/etc/sysconfig/selinux', 'SELINUX=enforcing', 'SELINUX=disabled')\n with settings(warn_only=True):\n run_as_root('/usr/sbin/setenforce 0')\n\n\ndef _init_config():\n # vim config\n upload('dotvimrc', '/root/.vimrc', template_dir=c.TEMPLATES_DIR,\n backup=False)\n reqire_file('/etc/profile.d/vim.sh', contents=\n 'alias vi=vim\\n')\n\n # ntpdate config\n upload('ntp.conf', '/etc/ntp.conf', template_dir=c.TEMPLATES_DIR,\n backup=False)\n\n\ndef _init_pip():\n from fabtools import python as py\n if not py.is_pip_installed():\n py.install_pip()\n" }, { "alpha_fraction": 0.6233845949172974, "alphanum_fraction": 0.6326153874397278, "avg_line_length": 24.390625, "blob_id": "68ad6bad27357ccd07df66a133f5090ca3cf33f2", "content_id": "dbffb99f77ddb302c328f3451312636c69492289", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1625, "license_type": "permissive", "max_line_length": 68, "num_lines": 64, "path": "/fabulous/git.py", "repo_name": "qiulin/fabulous", "src_encoding": "UTF-8", "text": "__author__ = 'qiulin'\n\nimport os\n\nfrom fabric.api import run, cd\n\nfrom fabtools.rpm import is_installed, uninstall, install\nfrom fabtools.require.rpm import packages as require_packages\nfrom fabtools.require.files import directory as require_directory\nfrom fabtools.files import remove\n\nfrom fabulous import config as c\n\n\nDEFAULT_VERSION = \"2.3.6\" # latest 2015-04-22\n\n\ndef install_from_source(version=DEFAULT_VERSION):\n\n if is_installed('git'):\n uninstall('git')\n\n require_packages([\n 'curl-devel',\n 'expat-devel',\n 'gettext-devel',\n 'openssl-devel',\n 'zlib-devel',\n 'perl-ExtUtils-MakeMaker'])\n\n git_filename = \"v%s.tar.gz\" % version\n git_url = \"https://github.com/git/git/archive/%s\" % git_filename\n\n require_directory(c.DOWNLOAD_DIR)\n with cd(c.DOWNLOAD_DIR):\n run('wget %s -O %s' % (git_url, git_filename))\n run('tar zxvf %s' % git_filename)\n\n with cd(os.path.join(c.DOWNLOAD_DIR, 'git-%s' % version)):\n run('make prefix=%s/git all' % c.INSTALL_DIR)\n run('make prefix=%s/git install' % c.INSTALL_DIR)\n\n\ndef uninstall_from_source():\n remove(os.path.join(c.INSTALL_DIR, 'git'), recursive=True)\n remove('/etc/profile.d/git.sh')\n\n\n\ndef _create_profile_d_file():\n from fabtools.require.files import file as require_file\n\n require_file(\"/etc/profile.d/git.sh\", contents=\n 'export GIT_HOME=\"%s/git\"\\n' % c.INSTALL_DIR +\n 'export PATH=$PATH:$GIT_HOME/bin/\\n',\n mode=0755)\n\n\ndef install_from_repo():\n install('git')\n\n\ndef uninstall_from_repo():\n uninstall('git')\n" }, { "alpha_fraction": 0.7142857313156128, "alphanum_fraction": 0.7142857313156128, "avg_line_length": 14, "blob_id": "01b4709ad35268a73dcf8449e09b51bbb1435386", "content_id": "4de6858cbaa28d8fbd97e4e4af4c95bdac102848", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14, "license_type": "permissive", "max_line_length": 14, "num_lines": 1, "path": "/fabulous/nagios.py", "repo_name": "qiulin/fabulous", "src_encoding": "UTF-8", "text": "# TODO: nagios" }, { "alpha_fraction": 0.75, "alphanum_fraction": 0.75, "avg_line_length": 16, "blob_id": "369fc795e0460cb6ce45502fcaaeaab1a5d9b894", "content_id": "c37df14cd9cfc6b41c7919d4c7bc56c9795d81b3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16, "license_type": "permissive", "max_line_length": 16, "num_lines": 1, "path": "/fabulous/iptables.py", "repo_name": "qiulin/fabulous", "src_encoding": "UTF-8", "text": "# TODO: iptables" }, { "alpha_fraction": 0.5764163136482239, "alphanum_fraction": 0.5777338743209839, "avg_line_length": 20.671428680419922, "blob_id": "711bffe58c35a5fc6ffb13a5ae51e121211ca00c", "content_id": "a9d38d342257d7b8c1c53042b4cca6ed98dab0be", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1518, "license_type": "permissive", "max_line_length": 76, "num_lines": 70, "path": "/bootstrap.sh", "repo_name": "qiulin/fabulous", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\n\nDOWNLOAD_DIR=/install\nPIP_URL=\"https://bootstrap.pypa.io/get-pip.py\"\nPWD=`pwd`\nFABULOUS_GIT=\"https://github.com/qiulin/fabulous\"\n\n# function: __os_info\n# Discover operation system information\n__os_info()\n{\n if [[ -f \"/etc/redhat-release\" ]]; then\n FAB_OS=$( awk '{print $1}' /etc/redhat-release )\n FAB_VER=$( awk '{print $3}' /etc/redhat-release )\n else\n FAB_OS=$(lsb_release -si)\n FAB_VER=$(lsb_release -sr)\n fi\n}\n\n# TODO\n__require_package()\n{\n echo \"\"\n}\n__os_info\n\necho $FAB_OS\necho $FAB_VER\necho $PWD\n\nif [[ ! -f $DOWNLOAD_DIR ]]; then\n mkdir -p $DOWNLOAD_DIR\n echo \"$DOWNLOAD_DIR did not exist.\"\n echo \"create $DOWNLOAD_DIR.\"\nfi\n\ncase \"$FAB_OS\" in\n CentOS)\n yum install -y epel-release wget python-devel git gcc make gmp-devel\n if [[ ! -n `which pip` ]]; then\n cd $DOWNLOAD_DIR # download get-pip.py\n wget $PIP_URL .\n python get-pip.py # install pip\n fi\n ;;\n ubuntu)\n apt-get install -y python-pip git build-essential python-dev\n ;;\n *)\n if [[ -n `which pip` ]]; then\n echo \"pip has already been installed.\"\n else\n echo \"pip has not been installed.\"\n echo \"Install pip\"\n fi\n ;;\n\nesac\n\npip install fabric fabtools # install fabric and fabtools\n\n\nif [[ -f $DOWNLOAD_DIR/fabulous ]]; then\n cd $DOWNLOAD_DIR/fabulous/\n git pull\nelse\n cd $DOWNLOAD_DIR/fabulous/\n git clone $FABULOUS_GIT\nfi\n\n" }, { "alpha_fraction": 0.6699029207229614, "alphanum_fraction": 0.6699029207229614, "avg_line_length": 19.799999237060547, "blob_id": "cbe061cd06d505d5ac1fa98fb4eab48b381a7aee", "content_id": "92b7b288646704cb74f522ad4b38fe47807d256a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 103, "license_type": "permissive", "max_line_length": 27, "num_lines": 5, "path": "/fabulous/config.py", "repo_name": "qiulin/fabulous", "src_encoding": "UTF-8", "text": "__author__ = 'qiulin'\n\nTEMPLATES_DIR = 'templates'\nDOWNLOAD_DIR = '/install'\nINSTALL_DIR = '/usr/local'" }, { "alpha_fraction": 0.6875, "alphanum_fraction": 0.6875, "avg_line_length": 16.882352828979492, "blob_id": "723ab1729c86611e13b4b5ac053c60f9de47d2a2", "content_id": "03c296b67ce6359988851c74fccec782aa8b6e1e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 304, "license_type": "permissive", "max_line_length": 67, "num_lines": 17, "path": "/fabfile/git.py", "repo_name": "qiulin/fabulous", "src_encoding": "UTF-8", "text": "__author__ = 'qiulin'\n\nfrom fabric.api import task\n\nfrom fabulous.git import install_from_source, uninstall_from_source\n\n\n@task\ndef install(version=None):\n if not version:\n install_from_source()\n else:\n install_from_source(version)\n\n@task\ndef uninstall():\n uninstall_from_source()\n" }, { "alpha_fraction": 0.6050000190734863, "alphanum_fraction": 0.6050000190734863, "avg_line_length": 9.256410598754883, "blob_id": "d15983b0382a4bcb65dc145c2d7912d241419d28", "content_id": "99b3c728a232304eacd1aaef5776eee0e4ef578d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 400, "license_type": "permissive", "max_line_length": 40, "num_lines": 39, "path": "/fabfile/elasticsearch.py", "repo_name": "qiulin/fabulous", "src_encoding": "UTF-8", "text": "from fabulous import elasticsearch as es\n\nfrom fabric.api import task\n\n\n@task\ndef install(version=None):\n if version:\n return\n else:\n es.install_from_rpm()\n\n\n@task\ndef enable():\n es.enable()\n\n@task\ndef disable():\n es.disable()\n\n@task\ndef start():\n es.start()\n\n\n@task\ndef stop():\n es.stop()\n\n\n@task\ndef restart():\n es.restart()\n\n\n@task\ndef status():\n es.status()\n" }, { "alpha_fraction": 0.6988636255264282, "alphanum_fraction": 0.6988636255264282, "avg_line_length": 11.571428298950195, "blob_id": "7e95acfac17e7419ac0f30b0ff8e2c50e19d0fa6", "content_id": "95fa520c0e988a68db284d0e0e08690203411f8a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 176, "license_type": "permissive", "max_line_length": 29, "num_lines": 14, "path": "/fabfile/system.py", "repo_name": "qiulin/fabulous", "src_encoding": "UTF-8", "text": "from fabric.api import task\nfrom fabric.utils import puts\n\nfrom fabulous import system\n\n\n@task\ndef ip():\n puts(system.get_ip())\n\n\n@task\ndef init():\n system.init_centos()\n" }, { "alpha_fraction": 0.800000011920929, "alphanum_fraction": 0.800000011920929, "avg_line_length": 9, "blob_id": "38d0221bdd250917ac7e950a1390ae5aa448e74e", "content_id": "575d6506ea7801999e943c03dbe8ac7b169694cf", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 52, "license_type": "permissive", "max_line_length": 17, "num_lines": 3, "path": "/README.md", "repo_name": "qiulin/fabulous", "src_encoding": "UTF-8", "text": "# fabulous\n\n基于Fabric的自动化运维工具。\n" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 15.615385055541992, "blob_id": "19a8b48907f6389b033c106c274ee77727e2156e", "content_id": "05460440ae7e4fab98d03f4c18284a56c293cf1e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 216, "license_type": "permissive", "max_line_length": 41, "num_lines": 13, "path": "/fabfile/java.py", "repo_name": "qiulin/fabulous", "src_encoding": "UTF-8", "text": "__author__ = 'qiulin'\n\nfrom fabulous import java\n\nfrom fabric.api import task\n\n\n@task\ndef install(version=None):\n if version:\n java.install_from_mirror(version)\n else:\n java.install_from_mirror()\n" } ]
17
uiboy-jj/douban250
https://github.com/uiboy-jj/douban250
829be8f1f4a8dded3f51b2eb952eadb1c051e353
1bdcaa21d548913cd38dd497c3d9aed2e2a8c3b3
0ae475b9ddd89f832a5abfe4a5a9562e9be4d110
refs/heads/master
2023-08-07T09:19:19.527726
2021-09-13T08:11:52
2021-09-13T08:11:52
405,881,159
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6038338541984558, "alphanum_fraction": 0.6059637665748596, "avg_line_length": 31.379310607910156, "blob_id": "f3691bf73fdf725e9e822ae182068dd684c4552e", "content_id": "b441e80f2aabe42ed887be04c78acf9e63b01cfb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1029, "license_type": "no_license", "max_line_length": 80, "num_lines": 29, "path": "/Douban/Douban/pipelines.py", "repo_name": "uiboy-jj/douban250", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html\n\nfrom scrapy.exporters import CsvItemExporter\n\nclass DoubanFilePipeline(object):\n def open_spider(self, spider): # 在爬虫开启的时候仅执行一次\n if spider.name == 'movie':\n\n self.file = open(\"movie.csv\", \"ab+\")\n self.exporter = CsvItemExporter(self.file, encoding='utf-8-sig')\n self.exporter.start_exporting()\n\n\n def close_spider(self, spider): # 在爬虫关闭的时候仅执行一次\n if spider.name == 'movie':\n self.exporter.finish_exporting()\n self.file.close()\n\n def process_item(self, item, spider):\n if spider.name == 'movie':\n self.exporter.export_item(item)\n\n # 不return的情况下,另一个权重较低的pipeline将不会获得item\n return item\n" }, { "alpha_fraction": 0.6235954761505127, "alphanum_fraction": 0.6404494643211365, "avg_line_length": 13.916666984558105, "blob_id": "ad1d5627c54cb11e58cf90b2399eaa09c70ddd1b", "content_id": "899a9825433e26e21d5e02a8893e4e437a7912f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 178, "license_type": "no_license", "max_line_length": 40, "num_lines": 12, "path": "/Douban/doubanweb/douban_1/urls.py", "repo_name": "uiboy-jj/douban250", "src_encoding": "UTF-8", "text": "#!/user/bin/env python3\n#-*-coding:utf-8 -*-\nfrom django.urls import path\nfrom . import views\napp_name = 'douban_1'\n\n\n\nurlpatterns = [\n path('',views.index,name = 'index'),\n\n]" }, { "alpha_fraction": 0.524669885635376, "alphanum_fraction": 0.562890887260437, "avg_line_length": 41.32352828979492, "blob_id": "ae6d5f6a0282f7008d014e259344794d3d52182f", "content_id": "30359d493fbc5006b7e3121f6ddaf7da84246f32", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1487, "license_type": "no_license", "max_line_length": 117, "num_lines": 34, "path": "/Douban/doubanweb/douban_1/migrations/0001_initial.py", "repo_name": "uiboy-jj/douban250", "src_encoding": "UTF-8", "text": "# Generated by Django 3.2 on 2021-09-09 06:23\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='movie_data',\n fields=[\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('comments_num', models.CharField(max_length=128, verbose_name='评论人数')),\n ('director', models.CharField(max_length=128, verbose_name='导演')),\n ('name', models.CharField(max_length=128, verbose_name='名称')),\n ('order', models.CharField(max_length=128, verbose_name='排名')),\n ('screenwriter', models.CharField(max_length=128, verbose_name='编剧')),\n ('type_one', models.CharField(max_length=128, verbose_name='类型1')),\n ('type_two', models.CharField(max_length=128, verbose_name='类型2')),\n ('type_three', models.CharField(max_length=128, verbose_name='类型3')),\n ('type_four', models.CharField(max_length=128, verbose_name='类型4')),\n ('type_five', models.CharField(max_length=128, verbose_name='类型5')),\n ('score', models.CharField(max_length=128, verbose_name='评分')),\n ],\n options={\n 'verbose_name_plural': 'Top250',\n },\n ),\n ]\n" }, { "alpha_fraction": 0.6531791687011719, "alphanum_fraction": 0.6546242833137512, "avg_line_length": 26.600000381469727, "blob_id": "6bd456ac4dfe4dc5c42a6a2581f6dbe0b41531cf", "content_id": "befe67b0e51b35cbc1ed81f5b0eb8351a4ff1b55", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 774, "license_type": "no_license", "max_line_length": 51, "num_lines": 25, "path": "/Douban/Douban/items.py", "repo_name": "uiboy-jj/douban250", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Define here the models for your scraped items\n#\n# See documentation in:\n# http://doc.scrapy.org/en/latest/topics/items.html\n\nimport scrapy\n\n\nclass DoubanItem(scrapy.Item):\n # define the fields for your item here like:\n order = scrapy.Field()#排榜顺序\n name = scrapy.Field()#电影名称\n director = scrapy.Field()#导演\n screenwriter = scrapy.Field()#编剧\n #starring = scrapy.Field()#主演\n type = scrapy.Field()#电影类型\n # place = scrapy.Field()#制片地\n # language = scrapy.Field()#语言\n # time = scrapy.Field()#上映时间\n score = scrapy.Field()#评分\n comments_num = scrapy.Field()#评价人数\n # whether_play = scrapy.Field()#能否播放\n #play_link = scrapy.Field()#播放链接\n\n\n" }, { "alpha_fraction": 0.6149870753288269, "alphanum_fraction": 0.6240310072898865, "avg_line_length": 22.363636016845703, "blob_id": "dd88a502288ecee596382f4dad0bc9be29e8b2e0", "content_id": "2f4c8fc8926fb9e77c29b7603f66fb18caff177d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 850, "license_type": "no_license", "max_line_length": 74, "num_lines": 33, "path": "/Douban/douban_clean.py", "repo_name": "uiboy-jj/douban250", "src_encoding": "UTF-8", "text": "#!/user/bin/env python3\n#-*-coding:utf-8 -*-\nfrom shutil import copyfile\nimport csv\nimport os\n# 相对路径\nif __name__ == '__main__':\n\n copyfile(\"movie.csv\", \"movie2.csv\") #留一份原始数据做参照\n\n\nif __name__ == '__main__':\n\n file_old = 'movie.csv'\n file_temp = 'movie_data.csv'\n\n with open(file_old, 'r', newline='', encoding='utf-8-sig') as f_old, \\\n open(file_temp, 'w', newline='', encoding='utf-8-sig') as f_temp:\n f_csv_old = csv.reader(f_old)\n f_csv_temp = csv.writer(f_temp)\n for i, rows in enumerate(f_csv_old): # 保留header\n if i == 0:\n f_csv_temp.writerow(rows)\n break\n for rows in f_csv_old:\n if rows[0] != 'comments_num': # 删除第一列值为comments_num的行\n f_csv_temp.writerow(rows)\n\n os.remove(file_old)\n os.rename(file_temp, file_old)\n\n\n#剩下还有一些分列操作 用excel工具\n\n\n\n" }, { "alpha_fraction": 0.6930434703826904, "alphanum_fraction": 0.7286956310272217, "avg_line_length": 49, "blob_id": "eec6af476599ed10ce5c271dddeadde1f8e307fa", "content_id": "5535bffe0a34614cd2c8dc7011ffb1be9076f5dc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1222, "license_type": "no_license", "max_line_length": 101, "num_lines": 23, "path": "/Douban/doubanweb/douban_1/models.py", "repo_name": "uiboy-jj/douban250", "src_encoding": "UTF-8", "text": "from django.db import models\n\n# Create your models here.\n#comments_num,director,name,order,screenwriter,type_one,type_two,type_three,type_four,type_five,score\nclass movie_data(models.Model):\n\n\n comments_num = models.CharField(max_length=128,verbose_name='评论人数',blank=False)\n director = models.CharField(max_length=128, verbose_name='导演', blank=False)\n name = models.CharField(max_length=128, verbose_name='名称', blank=False)\n order = models.CharField(max_length=128, verbose_name='排名', blank=False)\n screenwriter = models.CharField(max_length=128, verbose_name='编剧', blank=False)\n type_one = models.CharField(max_length=128,verbose_name='类型1',blank=False)\n type_two = models.CharField(max_length=128, verbose_name='类型2', blank=False)\n type_three = models.CharField(max_length=128, verbose_name='类型3', blank=False)\n type_four = models.CharField(max_length=128, verbose_name='类型4', blank=False)\n type_five = models.CharField(max_length=128, verbose_name='类型5', blank=False)\n score = models.CharField(max_length=128, verbose_name='评分', blank=False)\n\n\n\n class Meta:\n verbose_name_plural = 'Top250' #设置这个模型在后台的名字\n" }, { "alpha_fraction": 0.41305214166641235, "alphanum_fraction": 0.4540390074253082, "avg_line_length": 20.8157901763916, "blob_id": "046c837f06b665a08c31e7e8fcdff76c2ba36b2e", "content_id": "b0f2dd1ddadb7fa8542d38007d1033cfd31d087e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2925, "license_type": "no_license", "max_line_length": 50, "num_lines": 114, "path": "/README.md", "repo_name": "uiboy-jj/douban250", "src_encoding": "UTF-8", "text": "# douban250\ndouban电影top250\n目录结构\n│ chromedriver.exe\n│ douban_clean.py\n│ LATEST_RELEASE_92.0.4515\n│ movie.csv\n│ movie2.csv\n│ scrapy.cfg\n│\n├─.idea\n│ │ dataSources.local.xml\n│ │ dataSources.xml\n│ │ Douban.iml\n│ │ encodings.xml\n│ │ misc.xml\n│ │ modules.xml\n│ │ workspace.xml\n│ │\n│ ├─dataSources\n│ │ 6f2fe6b7-b649-424e-b78a-2746682f59f4.xml\n│ │\n│ └─inspectionProfiles\n│ profiles_settings.xml\n│\n├─Douban\n│ │ items.py\n│ │ middlewares.py\n│ │ pipelines.py\n│ │ settings.py\n│ │ __init__.py\n│ │\n│ ├─spiders\n│ │ │ movie.py\n│ │ │ __init__.py\n│ │ │\n│ │ └─__pycache__\n│ │ movie.cpython-36.pyc\n│ │ movie.cpython-37.pyc\n│ │ __init__.cpython-36.pyc\n│ │ __init__.cpython-37.pyc\n│ │\n│ └─__pycache__\n│ items.cpython-36.pyc\n│ items.cpython-37.pyc\n│ middlewares.cpython-36.pyc\n│ middlewares.cpython-37.pyc\n│ pipelines.cpython-37.pyc\n│ settings.cpython-36.pyc\n│ settings.cpython-37.pyc\n│ __init__.cpython-36.pyc\n│ __init__.cpython-37.pyc\n│\n└─doubanweb\n │ db.sqlite3\n │ manage.py\n │\n ├─doubanweb\n │ │ asgi.py\n │ │ settings.py\n │ │ urls.py\n │ │ wsgi.py\n │ │ __init__.py\n │ │\n │ └─__pycache__\n │ settings.cpython-37.pyc\n │ urls.cpython-37.pyc\n │ wsgi.cpython-37.pyc\n │ __init__.cpython-37.pyc\n │\n ├─douban_1\n │ │ admin.py\n │ │ apps.py\n │ │ models.py\n │ │ tests.py\n │ │ urls.py\n │ │ views.py\n │ │ __init__.py\n │ │\n │ ├─migrations\n │ │ │ 0001_initial.py\n │ │ │ __init__.py\n │ │ │\n │ │ └─__pycache__\n │ │ 0001_initial.cpython-37.pyc\n │ │ __init__.cpython-37.pyc\n │ │\n │ └─__pycache__\n │ admin.cpython-37.pyc\n │ apps.cpython-37.pyc\n │ models.cpython-37.pyc\n │ urls.cpython-37.pyc\n │ views.cpython-37.pyc\n │ __init__.cpython-37.pyc\n │\n ├─static\n │ ├─css\n │ │ index.css\n │ │ public.css\n │ │\n │ ├─images\n │ │ title_border_bg.png\n │ │ title_left_bg.png\n │ │ title_right_bg.png\n │ │\n │ └─js\n │ echarts-4.2.1.min.js\n │ echarts.all.js\n │ jquery-1.10.2.js\n │ jquery.min.js\n │ jquery.numscroll.js\n │\n └─templates\n index.html\n \n \n" }, { "alpha_fraction": 0.5867146849632263, "alphanum_fraction": 0.6181282997131348, "avg_line_length": 34.13793182373047, "blob_id": "c58de4ee4447a48760f90fce1ad54331057f7762", "content_id": "805f60e20f0190a17db38d063fb9ff0d316f4822", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3098, "license_type": "no_license", "max_line_length": 129, "num_lines": 87, "path": "/Douban/doubanweb/douban_1/views.py", "repo_name": "uiboy-jj/douban250", "src_encoding": "UTF-8", "text": "import json\nfrom django.db.models import Avg\nfrom django.shortcuts import render\nfrom .models import movie_data\nfrom django.db.models import Avg,Max\n\nfrom collections import Counter\n# Create your views here.\n# 主页函数\ndef index(request):\n\n comments_num_avg= movie_data.objects.all().aggregate(Avg('comments_num'))\n comments_num_max = movie_data.objects.all().aggregate(Max('comments_num'))\n score_avg = movie_data.objects.all().aggregate(Avg('score'))\n score_max = movie_data.objects.all().aggregate(Max('score'))\n avg_num = comments_num_avg['comments_num__avg']\n max_num = comments_num_max['comments_num__max']\n bv = ('%.1f' % score_avg['score__avg'])\n score_avg_num = bv\n score_max_num = score_max['score__max']\n\n a = list(movie_data.objects.values_list('type_one', flat=True))\n b = list(movie_data.objects.values_list('type_two', flat=True))\n c = list(movie_data.objects.values_list('type_three', flat=True))\n d = list(movie_data.objects.values_list('type_four', flat=True))\n e = list(movie_data.objects.values_list('type_five', flat=True))\n A = a+b+c+d+e\n B = dict(Counter(A))\n B.pop('无')\n\n max_value = max(B.values())\n for keys, values in B.items():\n if values == max_value:\n max_key = keys\n\n min_value = min(B.values())\n for keys, values in B.items():\n if values == min_value:\n min_key = keys\n\n directors = list(movie_data.objects.values_list('director', flat=True))\n directors_num_dict = dict(Counter(directors))\n director_max_value = max(directors_num_dict.values())\n for keys, values in directors_num_dict.items():\n if values == director_max_value:\n director_max_key = keys\n\n order_10 = list(movie_data.objects.filter(order__in=[1,2,3,4,5,6,7,8,9,10]).values_list('name', flat=True))\n order_102 = list(movie_data.objects.filter(order__in=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).values_list('comments_num', flat=True))\n name_10 = []\n for a in order_10:\n A = a.split()[0]\n\n name_10.append(A)\n numbers = [int(x) for x in order_102]\n print(name_10)\n print(numbers)\n # content = {}\n # content['B'] =json.dumps(numbers)\n # content['B1'] =json.dumps(name_10)\n # numbers = json.dumps(list(numbers)) # 封装数据 编码\n # numbers = json.loads(numbers) # 解码\n # name_10 = json.dumps(list(name_10)) # 封装数据 编码\n # name_10 = json.loads(name_10) # 解码\n\n # B = numbers\n # B1 = name_10\n # order_10 = list(movie_data.objects.values_list('order', flat=True))\n content_250 =movie_data.objects.values_list('name','order','score','comments_num')\n # name_250 = list(movie_data.objects.values_list('name', flat=True))\n # name_250_2 =[]\n # for a in name_250:\n # A = a.split()[0]\n #\n # name_250_2.append(A)\n #filter(order__in=[i for i in range(1,251)])\n content_250_2 = []\n for i in list(content_250):\n a = list(i)\n content_250_2.append(a)\n for i in content_250_2:\n i[0] = i[0].split()[0]\n\n\n\n\n return render(request, \"index.html\",locals())" }, { "alpha_fraction": 0.4718770384788513, "alphanum_fraction": 0.6257357597351074, "avg_line_length": 70.94117736816406, "blob_id": "50418a0524239783e31084952351f28629dd86be", "content_id": "22ee788faf231efe1277d338a4718a5f0f63021e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6250, "license_type": "no_license", "max_line_length": 989, "num_lines": 85, "path": "/Douban/Douban/spiders/movie.py", "repo_name": "uiboy-jj/douban250", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport scrapy\nfrom Douban.items import DoubanItem\nfrom lxml import etree\nclass MovieSpider(scrapy.Spider):\n name = 'movie'\n allowed_domains = ['douban.com']\n start_urls = [\n #'https://movie.douban.com/top250'\n 'https://movie.douban.com/top250?start=25&filter='\n #'https://movie.douban.com/top250?start=50&filter='\n #'https://movie.douban.com/top250?start=75&filter='\n #'https://movie.douban.com/top250?start=100&filter='\n #'https://movie.douban.com/top250?start=125&filter='\n #'https://movie.douban.com/top250?start=150&filter='\n #'https://movie.douban.com/top250?start=175&filter='\n #'https://movie.douban.com/top250?start=200&filter='\n #'https://movie.douban.com/top250?start=225&filter='\n ]\n\n def start_requests(self): # 重构start_requests方法\n # 这个cookies_str是抓包获取的\n #cookies_str = 'bid=dn-cU54k5kc; ap_v=0,6.0; push_doumail_num=0; push_noty_num=0; __utma=30149280.782481749.1630297594.1630297594.1630297594.1; __utmc=30149280; __utmz=30149280.1630297594.1.1.utmcsr=accounts.douban.com|utmccn=(referral)|utmcmd=referral|utmcct=/; __utmv=30149280.22573; __utmc=223695111; dbcl2=\"225736133:Dy32zxXNP4c\"; ck=jLkr; __utmt=1; __utmb=30149280.8.10.1630297594; ll=\"118281\"; __utma=223695111.1569998650.1630298013.1630298013.1630300806.2; __utmb=223695111.0.10.1630300806; __utmz=223695111.1630300806.2.2.utmcsr=douban.com|utmccn=(referral)|utmcmd=referral|utmcct=/; _pk_ses.100001.4cf6=*; _pk_ref.100001.4cf6=%5B%22%22%2C%22%22%2C1630300806%2C%22https%3A%2F%2Fwww.douban.com%2F%22%5D; _vwo_uuid_v2=DAE529127AFAF5EA345E61DC10B55126D|1d8825cb905cafb4161037dff964eb2f; __gads=ID=a60a38c0f590ea34-221af3473ccb00f5:T=1630297593:RT=1630300910:S=ALNI_MY7Yn8i9Nbijaj8cm8F11qx8b9ang; _pk_id.100001.4cf6=323e8f4020f4ba8e.1630298013.2.1630300919.1630298976.' # 抓包获取\n # 将cookies_str转换为cookies_dict\n cookies_str = 'bid=dn-cU54k5kc; push_noty_num=0; push_doumail_num=0; __utmc=30149280; __utmz=30149280.1630297594.1.1.utmcsr=accounts.douban.com|utmccn=(referral)|utmcmd=referral|utmcct=/; __utmv=30149280.22573; __utmc=223695111; dbcl2=\"225736133:Dy32zxXNP4c\"; ck=jLkr; ll=\"118281\"; __utmz=223695111.1630300806.2.2.utmcsr=douban.com|utmccn=(referral)|utmcmd=referral|utmcct=/; _vwo_uuid_v2=DAE529127AFAF5EA345E61DC10B55126D|1d8825cb905cafb4161037dff964eb2f; __gads=ID=a60a38c0f590ea34-221af3473ccb00f5:T=1630297593:RT=1630300910:S=ALNI_MY7Yn8i9Nbijaj8cm8F11qx8b9ang; _pk_ref.100001.4cf6=%5B%22%22%2C%22%22%2C1630418084%2C%22https%3A%2F%2Fwww.douban.com%2F%22%5D; _pk_ses.100001.4cf6=*; __utma=30149280.782481749.1630297594.1630297594.1630418084.2; __utmb=30149280.0.10.1630418084; __utma=223695111.1569998650.1630298013.1630300806.1630418084.3; __utmb=223695111.0.10.1630418084; ap_v=0,6.0; _pk_id.100001.4cf6=323e8f4020f4ba8e.1630298013.3.1630418435.1630302962.'\n cookies_dict = {i.split('=')[0]: i.split('=')[1] for i in cookies_str.split('; ')}\n yield scrapy.Request(\n self.start_urls[0],\n callback=self.parse,\n cookies=cookies_dict\n )\n\n\n def parse(self, response):\n item = DoubanItem()\n #提前详情页面url\n movie_list = response.xpath('//*[@id=\"content\"]/div/div[1]/ol/li/div/div[1]/a/@href').extract()\n #palce = response.xpath('//*[@id=\"content\"]/div/div[1]/ol/li/div/div[2]/div[2]/p[1]/text()[2]')\n #item['place'] = response.xpath('//*[@id=\"content\"]/div/div[1]/ol/li/div/div[2]/div[2]/p[1]/text()[2]').extract()\n # for movie,pla in zip(movie_list,palce):\n # url = movie\n # places =str(pla).replace(' ','').split('/')[1]\n # yield scrapy.Request(url = url, callback=self.detail_parse,meta={\"palce\":places})\n for movie in movie_list:\n url = movie\n yield scrapy.Request(url = url, callback=self.detail_parse)\n\n # '''//*[@id=\"content\"]/div/div[1]/div[2]/span[3]/a'''\n # next_url = response.xpath('//*[@id=\"content\"]/div/div[1]/div[2]/span[3]/a/@href').extract_first()\n # if next_url != None:\n # next_url = response.urljoin(next_url)\n # yield scrapy.Request(url=next_url,callback = self.parse)\n\n\n def detail_parse(self,response):\n ''' order = scrapy.Field()#排榜顺序\n name = scrapy.Field()#电影名称\n director = scrapy.Field()#导演\n screenwriter = scrapy.Field()#编剧\n starring = scrapy.Field()#主演\n type = scrapy.Field()#电影类型\n place = scrapy.Field()#制片地\n language = scrapy.Field()#语言\n time = scrapy.Field()#上映时间\n score = scrapy.Field()#评分\n comments_num = scrapy.Field()#评价人数\n whether_play = scrapy.Field()#能否播放\n play_link = scrapy.Field()#播放链接\n\n '''\n item = DoubanItem()\n # item['name'] = response.xpath('//*[@id=\"content\"]/h1/a/text()').extract_first()\n item['order'] = response.xpath('//*[@id=\"content\"]/div[1]/span[1]/text()').extract_first()\n item['director'] = response.xpath('//*[@id=\"info\"]/span[1]/span[2]/a/text()').extract()\n item['screenwriter'] = response.xpath('//*[@id=\"info\"]/span[2]/span[2]/a/text()').extract()\n #item['starring'] = response.xpath('//*[@id=\"info\"]/span[3]/span[2]/span/a/text()').extract()\n item['type'] = response.xpath('//*[@id=\"info\"]/span[@property=\"v:genre\"]/text()').extract()\n # item['place'] = response.meta['palce']\n # item['time'] = response.xpath('//*[@id=\"content\"]/h1/span/text()').extract_first()\n item['name'] = response.xpath('//*[@id=\"content\"]/h1/span/text()').extract_first()\n item['score'] = response.xpath('//*[@id=\"interest_sectl\"]/div[1]/div[2]/strong/text()').extract()\n item['comments_num'] = response.xpath('//*[@id=\"interest_sectl\"]/div[1]/div[2]/div/div[2]/a/span/text()').extract_first()\n #item['play_link'] = response.xpath('//*[@id=\"content\"]/h1/a/@href').extract_first()\n\n yield item\n\n" } ]
9
mtx-testfan/0116
https://github.com/mtx-testfan/0116
68a54d7a3d4b94bea6869556c11b3a21eee53a9b
127c5ebce83ad7bfd5631dfe1a3a52d90e2ae428
7e8f6219b69fed9b1cc684e0631062c53d23163b
refs/heads/master
2023-02-12T18:29:43.982653
2021-01-16T03:39:25
2021-01-16T03:39:25
330,076,653
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6558773517608643, "alphanum_fraction": 0.6695059537887573, "avg_line_length": 32.86538314819336, "blob_id": "f62dbe59efba5bf29b02475b7c86af981a5a5b0d", "content_id": "91a4d8c9b77d7afa3f1a1dd9fecbcd6daa57ae64", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2033, "license_type": "no_license", "max_line_length": 86, "num_lines": 52, "path": "/pageObject/__init__.py", "repo_name": "mtx-testfan/0116", "src_encoding": "UTF-8", "text": "from selenium.webdriver.common.by import By\n'''\n整个项目的配置项\n'''\nurl = 'http://121.42.15.146:9090/mtx/'\n'''以下是登录页面的配置信息'''\nlogin_name = By.NAME,'accounts'\nlogin_pwd = By.NAME,'pwd'\nlogin_button = By.XPATH,\"//form/div[3]/button\"\n'''以下是首页的配置信息'''\nindex_login_link = By.LINK_TEXT,'登录'\nindex_person_center=By.XPATH,'//*[text()=\"个人中心\"]'\nindex_click_right_cart = By.XPATH,'(//*[text()=\"购物车\"])[2]'\nindex_click_index = By.CSS_SELECTOR,'div#doc-topbar-collapse>ul>li:nth-child(1)>a'\n\n\n\n'''以下是购物车页面的配置信息'''\ncart_delete_button = By.XPATH,'(//*[text()=\"删除\"])[1]'\ncart_confirm_delete = By.XPATH,'//*[text()=\"确定\"]'\n\n'''以下是个人中心的配置信息'''\npersoncenter_myaddress_link = By.LINK_TEXT,'我的地址'\n\n\n'''以下是新增地址的配置信息'''\nmyaddress_add_address = By.XPATH,'//*[text()=\" 新增新地址\"]'\nmyaddress_username = By.NAME,'name'\nmyaddress_tel = By.NAME,'tel'\nmyaddress_prov = By.NAME,'province'\nmyaddress_city = By.NAME,'city'\nmyaddress_country = By.NAME,'county'\nmyaddress_js_prov = \"document.querySelectorAll('select')[0].style.display='inline'\"\nmyaddress_js_city = \"document.querySelectorAll('select')[1].style.display='inline'\"\nmyaddress_js_country = \"document.querySelectorAll('select')[2].style.display='inline'\"\nmyaddress_nickname = By.NAME,'alias'\nmyaddress_detailaddress = By.NAME,'address'\nmyaddress_save = By.XPATH,'//*[text()=\"保存\"]'\n\n'''以下是商品详情页的配置'''\ngoods_detail_add_cart =By.XPATH,'//*[text()=\"加入购物车\"]'\nloc_zk_skirt = By.CSS_SELECTOR, '#floor2 div.goods-list>div:nth-child(1)'\ntitle = 'ZK爆款连衣裙'\nloc_pink = By.XPATH,'//*[@data-value=\"粉色\"]'\nloc_M = By.XPATH,'//*[@data-value=\"M\"]'\nloc_now_buy = By.XPATH,'//*[text()=\"立即购买\"]'\ngoogs_detail_click_right_cart = By.XPATH,'//*[text()=\"购物车\"]'\n\n\n'''购买页面的配置'''\nloc_payment = By.XPATH, '//*[text()=\"货到付款\"]'\nloc_submit_order = By.XPATH,'//*[text()=\"提交订单\"]'\n" }, { "alpha_fraction": 0.7372013926506042, "alphanum_fraction": 0.7372013926506042, "avg_line_length": 21.076923370361328, "blob_id": "6eeca96077d2e4b2ac1027576872597ce768447e", "content_id": "1f5cab619496ed30abfed4092f3fcc82b81b6e95", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 317, "license_type": "no_license", "max_line_length": 63, "num_lines": 13, "path": "/pageObject/page_personcenter.py", "repo_name": "mtx-testfan/0116", "src_encoding": "UTF-8", "text": "import allure\nfrom selenium.webdriver.common.by import By\n\nfrom base.baseApi import Base\nimport time\nimport pageObject\n\nclass PagePersonCenter(Base):\n\n @allure.step('点击我的地址')\n # 点击我的地址\n def click_my_address(self):\n self.base_click(pageObject.personcenter_myaddress_link)\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.7135549783706665, "alphanum_fraction": 0.7135549783706665, "avg_line_length": 20.61111068725586, "blob_id": "d71bbfa6cdda5ce99feb7304895f47fc578c83ef", "content_id": "6be0cc72757a2092febf674a6f75ff3deaea1ffd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 419, "license_type": "no_license", "max_line_length": 55, "num_lines": 18, "path": "/pageObject/page_cart.py", "repo_name": "mtx-testfan/0116", "src_encoding": "UTF-8", "text": "import allure\nfrom selenium.webdriver.common.by import By\n\nfrom base.baseApi import Base\nimport time\nimport pageObject\n\nclass PageCart(Base):\n\n\n @allure.step('点击删除按钮')\n def click_delete_button(self):\n self.base_click(pageObject.cart_delete_button)\n\n #确定删除\n @allure.step('确定删除')\n def click_confirm_delete(self):\n self.base_click(pageObject.cart_confirm_delete)\n\n\n" }, { "alpha_fraction": 0.6845930218696594, "alphanum_fraction": 0.6845930218696594, "avg_line_length": 35.05263137817383, "blob_id": "606137e9541eb6ab6ba9f73620d2a7701678a54d", "content_id": "1081739e2fedea38178eacc1da20cb402f1b1120", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 700, "license_type": "no_license", "max_line_length": 53, "num_lines": 19, "path": "/pageAction/addaddress_action.py", "repo_name": "mtx-testfan/0116", "src_encoding": "UTF-8", "text": "from pageAction.actions_manager import ActionsManager\n\n\nclass AddAddress(ActionsManager):\n # 组合业务\n def addaddress_business(self):\n self.pageindex.click_person_center()\n self.pagepersoncenter.click_my_address()\n self.pagemyaddress.click_new_address()\n # 切换iframe\n self.pagemyaddress.switch_to_iframe()\n self.pagemyaddress.input_name()\n self.pagemyaddress.input_tel()\n self.pagemyaddress.select_prov()\n self.pagemyaddress.select_city()\n self.pagemyaddress.select_country()\n self.pagemyaddress.input_detail_address()\n self.pagemyaddress.address_nickname()\n self.pagemyaddress.address_save()\n\n\n\n" }, { "alpha_fraction": 0.672301709651947, "alphanum_fraction": 0.672301709651947, "avg_line_length": 24.566667556762695, "blob_id": "167a0ac356b7fa2144b3a5f534e5ce1fa7e4e6f0", "content_id": "e16cf0b5c6cbfd1ab844d6fc580d764f08a743f1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 939, "license_type": "no_license", "max_line_length": 65, "num_lines": 30, "path": "/pageObject/page_index.py", "repo_name": "mtx-testfan/0116", "src_encoding": "UTF-8", "text": "import allure\n\nfrom base.baseApi import Base\nimport time\nimport pageObject\nclass PageIndex(Base):\n\n # 在首页点击登录链接\n @allure.step('点击登录链接')\n def click_login_link(self):\n self.base_click(pageObject.index_login_link)\n\n # 在首页点击zk裙子\n @allure.step('首页-点击zk裙子')\n def click_zk_skirt(self):\n self.base_click(pageObject.loc_zk_skirt)\n\n # 切换窗口(过渡的动作-前移-写在触发这个动作的页面上)\n @allure.step('切换窗口-从首页到商品详情页')\n def switch_window(self):\n self.base_switch_window(pageObject.title)\n\n # 点击个人中心\n @allure.step('点击个人中心')\n def click_person_center(self):\n self.base_click(pageObject.index_person_center)\n\n @allure.step('点击右上角的购物车')\n def click_right_cart(self):\n self.base_click(pageObject.googs_detail_click_right_cart)\n\n\n" }, { "alpha_fraction": 0.663701057434082, "alphanum_fraction": 0.663701057434082, "avg_line_length": 21.79166603088379, "blob_id": "8dcfd2f42258561463d8ec0145843694ba53efdf", "content_id": "9666414bde4fe9aa17d78fc42a420f1807aa539f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 640, "license_type": "no_license", "max_line_length": 55, "num_lines": 24, "path": "/pageObject/page_login.py", "repo_name": "mtx-testfan/0116", "src_encoding": "UTF-8", "text": "import allure\nfrom selenium.webdriver.common.by import By\n\nfrom base.baseApi import Base\nimport time\nimport pageObject\n\nclass PageLogin(Base):\n\n # 输入用户名\n @allure.step('输入用户名')\n def input_username(self,username):\n # 找到用户名的按钮,点击输入\n\n self.base_input(pageObject.login_name,username)\n # 输入密码\n @allure.step('输入密码')\n def input_pwd(self,password):\n\n self.base_input(pageObject.login_pwd,password)\n # 点击登录\n @allure.step('点击登录')\n def click_login_button(self):\n self.base_click(pageObject.login_button)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.5942460298538208, "alphanum_fraction": 0.5952380895614624, "avg_line_length": 17.629629135131836, "blob_id": "ec4a49eb902415f03691f99abfd1224a16812f8c", "content_id": "0c0121997a8b75d1b2cfc41531429cd661975710", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1144, "license_type": "no_license", "max_line_length": 65, "num_lines": 54, "path": "/pageObject/page_goods_detail.py", "repo_name": "mtx-testfan/0116", "src_encoding": "UTF-8", "text": "import allure\nfrom selenium.webdriver.common.by import By\n\nfrom base.baseApi import Base\nimport time\nimport pageObject\n\nclass PageGoodsDetail(Base):\n\n # 点击粉色\n @allure.step('点击粉色')\n def click_pink(self):\n self.base_click(pageObject.loc_pink)\n\n # 点击M\n @allure.step('点击M')\n def click_M(self):\n self.base_click(pageObject.loc_M)\n\n # 点击立即购买\n @allure.step('点击立即购买')\n def click_now_buy(self):\n self.base_click(pageObject.loc_now_buy)\n\n # 加入购物车\n @allure.step('加入购物车')\n def click_add_cart(self):\n self.base_click(pageObject.goods_detail_add_cart)\n\n @allure.step('点击右上角的购物车')\n def click_right_cart(self):\n self.base_click(pageObject.googs_detail_click_right_cart)\n\n\n\n\n\n\n\n\n\n\n\n # # 组合页面\n # def login_business(self):\n # # 点击登录链接\n # self.click_login_link()\n # # 输入用户名\n # self.input_username()\n # # 输入密码\n # self.input_pwd()\n # # 点击登录按钮\n # self.click_login()\n # time.sleep(2)\n\n\n" }, { "alpha_fraction": 0.6038251519203186, "alphanum_fraction": 0.6074681282043457, "avg_line_length": 25.071428298950195, "blob_id": "f631607a2aa08e374e10028a5e589960c576c427", "content_id": "48028b269a7a262e5c2797f6d5eb8774c95046b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1578, "license_type": "no_license", "max_line_length": 60, "num_lines": 42, "path": "/base/driver.py", "repo_name": "mtx-testfan/0116", "src_encoding": "UTF-8", "text": "\nfrom base.oepnDriver import OpenDriver\n\n'''\n单例模式:设计思想\n类可以创建多少个对象 一个类可以创建很多对象\n单例 类只能创建具有相同内存地址的对象 一个:所有创建的对象的内存地址都是一个 id(对象1) == id(对象2)\n内存地址是一个\n应用场景:回收站,任务管理器,日志,数据库连接池 单例模式\n'''\nimport pageObject\nclass Driver:\n # 标记,用它来判断是否实例化过对象\n # 初始化,定义一下这个driver从来没有实例化过\n # 类属性 类方法是操作类属性的\n driver = None\n\n @classmethod\n def get_drvier(cls):\n ## 实例化driver对象 控制Chrome这个类只调用一次\n # 条件语句:判断一下driver属性是否被赋值过,即Chrome() 是否被实例化过\n if cls.driver is None:\n # 调用这个方法\n # cls.driver = webdriver.Chrome() # 核心\n cls.driver = OpenDriver().get_driver('谷歌') # 核心\n cls.driver.get(pageObject.url)\n cls.driver.maximize_window()\n return cls.driver\n @classmethod\n def close_driver(cls):\n # 为了程序的健壮性,需要判断下driver是否为None\n if cls.driver:\n cls.driver.quit()\n # 必须 置空的\n cls.driver = None # 我们浏览器对象关闭了 没有了\n\n\nif __name__ == '__main__':\n driver = Driver().get_drvier()\n print(id(driver))\n driver1 = Driver().get_drvier()\n print(id(driver1))\n Driver().close_driver()\n\n\n" }, { "alpha_fraction": 0.6659064888954163, "alphanum_fraction": 0.6670467257499695, "avg_line_length": 24.823530197143555, "blob_id": "0138f418679715b457e7959c4ffb7edbea3c14fa", "content_id": "0e4fcc419667eb0c77585a04ad72211e61545749", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1039, "license_type": "no_license", "max_line_length": 51, "num_lines": 34, "path": "/case/test_add_address.py", "repo_name": "mtx-testfan/0116", "src_encoding": "UTF-8", "text": "import time\n\nimport allure\n\nfrom base.baseApi import Base\nfrom base.driver import Driver\nfrom pageAction.addaddress_action import AddAddress\nfrom pageAction.login_action import Login\nfrom pageAction.order_action import Order\n\n@allure.feature('新增地址的测试')\nclass TestAddAddress:\n def setup_class(self):\n # 创建driver对象\n self.driver = Driver.get_drvier()\n # 依赖登录,调用成功登录的业务\n Login(self.driver).login_success()\n # 实例化一个Base对象\n self.base = Base(self.driver)\n # 创建新增地址的对象对象\n self.addaddress = AddAddress(self.driver)\n\n # 关闭driver\n def teardown_class(self):\n Driver.close_driver()\n\n # 测试用例:新增地址\n @allure.title('新增地址的正向测试用例')\n def test_add_address(self):\n # 调用增加地址成功的业务\n self.addaddress.addaddress_business()\n # 断言\n time.sleep(1)\n assert '新增成功' in self.base.base_page_source" }, { "alpha_fraction": 0.5965434908866882, "alphanum_fraction": 0.5977354049682617, "avg_line_length": 23.691177368164062, "blob_id": "eb1f80a543f27f384e368f542e579b8dc3aaa533", "content_id": "bde62b249471795610d1c75bb1bdf592ab0696c3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2074, "license_type": "no_license", "max_line_length": 53, "num_lines": 68, "path": "/pageAction/login_action.py", "repo_name": "mtx-testfan/0116", "src_encoding": "UTF-8", "text": "import time\n\nfrom pageAction.actions_manager import ActionsManager\nfrom pageObject.page_index import PageIndex\nfrom pageObject.page_login import PageLogin\n'''\n第一种方法:在登录这个类中进行两个页面的实例化\n第二种方法:多继承\n'''\nfrom base.driver import Driver\n\n# # 第二种方法\n# class Login(PageLogin,PageIndex):\n#\n# # 组合业务:登录成功的业务\n# def login_success(self):\n# # 点击首页的登录链接\n# self.click_login_link()\n# # 输入用户名\n# self.input_username('yaoyao')\n# # 输入密码\n# self.input_pwd('yaoyao')\n# # 点击登录\n# self.click_login_button()\n#\n# # 组合业务:登录的业务(参数化)\n# def login_business(self, username, password):\n# # 点击首页的登录链接\n# self.click_login_link()\n# # 输入用户名\n# self.input_username(username)\n# # 输入密码\n# self.input_pwd(password)\n# # 点击登录\n# self.click_login_button()\n\n#第一种方法\nclass Login(ActionsManager):\n # 继承ActionsManager,做到了页面对象类实例化的最大复用性\n\n # 组合业务:登录成功的业务\n def login_success(self):\n # 点击首页的登录链接\n self.pageindex.click_login_link()\n # 输入用户名\n self.pagelogin.input_username('yaoyao')\n # 输入密码\n self.pagelogin.input_pwd('yaoyao')\n # 点击登录\n self.pagelogin.click_login_button()\n time.sleep(3)\n\n # 组合业务:登录的业务(参数化)\n def login_business(self,username,password):\n # 点击首页的登录链接\n self.pageindex.click_login_link()\n # 输入用户名\n self.pagelogin.input_username(username)\n # 输入密码\n self.pagelogin.input_pwd(password)\n # 点击登录\n self.pagelogin.click_login_button()\n time.sleep(3)\n\n\nif __name__ == '__main__':\n driver = Driver.get_drvier()\n Login(driver).login_success()" }, { "alpha_fraction": 0.6370558142662048, "alphanum_fraction": 0.6383248567581177, "avg_line_length": 22.909090042114258, "blob_id": "3494712545610d270c24bff37608b8025707a350", "content_id": "3a0d05d7a7c2509c8fc059bdbc466aa05222f33a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 928, "license_type": "no_license", "max_line_length": 51, "num_lines": 33, "path": "/case/test_order.py", "repo_name": "mtx-testfan/0116", "src_encoding": "UTF-8", "text": "import time\n\nimport allure\n\nfrom base.baseApi import Base\nfrom base.driver import Driver\nfrom pageAction.login_action import Login\nfrom pageAction.order_action import Order\n\n@allure.feature('下订单功能的测试')\nclass TestOrder:\n def setup_class(self):\n # 创建driver对象\n self.driver = Driver.get_drvier()\n # 依赖登录,调用成功登录的业务\n Login(self.driver).login_success()\n # 实例化一个Base对象\n self.base = Base(self.driver)\n # 创建order对象\n self.order = Order(self.driver)\n\n # 关闭driver\n def teardown_class(self):\n Driver.close_driver()\n\n # 测试用例:下订单\n @allure.title('下订单的正向测试用例')\n def test_order(self):\n # 调用下订单的业务\n self.order.business_order()\n # 断言\n time.sleep(1)\n assert '提交成功' in self.base.base_page_source" }, { "alpha_fraction": 0.7546933889389038, "alphanum_fraction": 0.7546933889389038, "avg_line_length": 35.318180084228516, "blob_id": "97c0a85dbf8a18c9a5fbe0616498f6d234dd0556", "content_id": "bdee157833cb434662647d5afda81d79f5f6b48f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 873, "license_type": "no_license", "max_line_length": 57, "num_lines": 22, "path": "/pageAction/actions_manager.py", "repo_name": "mtx-testfan/0116", "src_encoding": "UTF-8", "text": "from pageObject.page_buy import PageBuy\nfrom pageObject.page_cart import PageCart\nfrom pageObject.page_goods_detail import PageGoodsDetail\nfrom pageObject.page_index import PageIndex\nfrom pageObject.page_login import PageLogin\nfrom pageObject.page_myaddress import PageMyAddress\nfrom pageObject.page_personcenter import PagePersonCenter\n\n\nclass ActionsManager:\n '''\n 管理页面对象的\n 有多少个页面对象,就实例化多少个,然后其他的业务就继承ActionsManager这个类\n '''\n def __init__(self, driver):\n self.pagelogin = PageLogin(driver)\n self.pageindex = PageIndex(driver)\n self.pagegoodsdetail = PageGoodsDetail(driver)\n self.pagebuy = PageBuy(driver)\n self.pagepersoncenter = PagePersonCenter(driver)\n self.pagemyaddress = PageMyAddress(driver)\n self.pagecart = PageCart(driver)\n" }, { "alpha_fraction": 0.5798192620277405, "alphanum_fraction": 0.5813252925872803, "avg_line_length": 14.7619047164917, "blob_id": "165f89744f775c64292c8ba427dd14f19cfe6454", "content_id": "f0a886b7d968ddb173ee3424a56742337f3b4cd3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 758, "license_type": "no_license", "max_line_length": 52, "num_lines": 42, "path": "/pageObject/page_buy.py", "repo_name": "mtx-testfan/0116", "src_encoding": "UTF-8", "text": "import allure\nfrom selenium.webdriver.common.by import By\n\nfrom base.baseApi import Base\nimport time\nimport pageObject\n\nclass PageBuy(Base):\n\n # 点击支付方式\n @allure.step('点击支付方式')\n def click_payment(self):\n self.base_click(pageObject.loc_payment)\n\n # 点击提交订单\n @allure.step('提交订单')\n def click_submit_order(self):\n self.base_click(pageObject.loc_submit_order)\n\n\n\n\n\n\n\n\n\n\n\n\n\n # # 组合页面\n # def login_business(self):\n # # 点击登录链接\n # self.click_login_link()\n # # 输入用户名\n # self.input_username()\n # # 输入密码\n # self.input_pwd()\n # # 点击登录按钮\n # self.click_login()\n # time.sleep(2)\n\n\n" }, { "alpha_fraction": 0.6349378228187561, "alphanum_fraction": 0.6468361020088196, "avg_line_length": 26.16176414489746, "blob_id": "6db51346f8aa85aeac961f29671b001c69632087", "content_id": "c719c3d397bf689db9ae1f0cf900e7b92130ec16", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2105, "license_type": "no_license", "max_line_length": 75, "num_lines": 68, "path": "/pageObject/page_myaddress.py", "repo_name": "mtx-testfan/0116", "src_encoding": "UTF-8", "text": "import allure\nfrom selenium.webdriver.common.by import By\n\nfrom base.baseApi import Base\nimport time\nimport pageObject\n\nclass PageMyAddress(Base):\n\n # 输入用户名\n @allure.step('点击新增地址的按钮')\n def click_new_address(self):\n self.base_click(pageObject.myaddress_add_address)\n\n # 通过index 从0开始 目标iframe是index=2第三个iframe\n @allure.step('切换iframe-编辑我的新地址页面')\n def switch_to_iframe(self):\n # 通过iframe的索引值进行定位\n # self.driver.switch_to.frame(2)\n self.base_switch_iframe(2)\n\n #\n @allure.step('输入用户名')\n def input_name(self):\n\n self.base_input(pageObject.myaddress_username,'zuocheng')\n # 输入电话\n @allure.step('输入电话')\n def input_tel(self):\n self.base_input(pageObject.myaddress_tel,'13636751740')\n\n @allure.step('选择省份')\n def select_prov(self):\n # js-select 显示\n self.base_js(pageObject.myaddress_js_prov)\n # select 省 选择北京\n self.base_select_visible_text(pageObject.myaddress_prov,'北京市')\n time.sleep(2)\n\n @allure.step('选择城市')\n def select_city(self):\n self.base_js(pageObject.myaddress_js_city)\n # select 省 选择北京\n self.base_select_visible_text(pageObject.myaddress_city, '西城区')\n time.sleep(2)\n\n @allure.step('选择区县')\n def select_country(self):\n self.base_js(pageObject.myaddress_js_country)\n # select 省 选择北京\n self.base_select_visible_text(pageObject.myaddress_country, '月坛街道')\n time.sleep(2)\n\n # 详细地址\n @allure.step('输入详细地址')\n def input_detail_address(self):\n self.base_input(pageObject.myaddress_detailaddress,'北京市西城区')\n\n # 别名\n @allure.step('输入别名')\n def address_nickname(self):\n self.base_input(pageObject.myaddress_nickname,'0113')\n\n\n # 点击保存\n @allure.step('点击保存')\n def address_save(self):\n self.base_click(pageObject.myaddress_save)\n\n\n" }, { "alpha_fraction": 0.6406965851783752, "alphanum_fraction": 0.6471127271652222, "avg_line_length": 23.68181800842285, "blob_id": "385dc5501202f0290b4b3f7d373d2ebf28777d73", "content_id": "d684353e3777f76b6c4c3dce2463f1956ca1b2b1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1427, "license_type": "no_license", "max_line_length": 63, "num_lines": 44, "path": "/case/test_login.py", "repo_name": "mtx-testfan/0116", "src_encoding": "UTF-8", "text": "import time\n\nimport pytest\n\nfrom base.driver import Driver\nfrom pageAction.login_action import Login\nfrom tool.readData import ReadData\nfrom base.baseApi import Base\ndata = ReadData().get_yaml('login_data','test_login')\n'''\n登录用例的参数化数据如何组织?如何断言\n1.前面是登录失败的,后面一条是登录成功的(推荐写法),前提条件:setup_class,teardown_class\n2.没有顺序的,失败和成功的交叉去写 \n2.1 判断:如果成功了,那就先退出,判断是否退出成功,退出成功,再继续参数化 \n前提条件 类级别的装置函数(setup_class,teardown_class)\n2.2 前提条件:装置函数可以用方法级别\n\n'''\nclass TestLogin:\n def setup_class(self):\n '''\n 初始化chrome对象\n :return:\n '''\n # 创建driver对象\n self.driver = Driver.get_drvier()\n # 创建login的业务对象\n self.login = Login(self.driver)\n # 创建base对象,调用page_source的方法\n self.base = Base(self.driver)\n\n @pytest.mark.parametrize('args',data)\n def test_login(self,args):\n self.login.login_business(args['accounts'],args['pwd'])\n time.sleep(2)\n assert args['assert'] in self.base.base_page_source\n\n\n def teardown_class(self):\n '''\n 所有测试用例执行完毕关闭浏览器\n :return:\n '''\n Driver.close_driver()\n\n\n\n\n\n" }, { "alpha_fraction": 0.6239554286003113, "alphanum_fraction": 0.6392757892608643, "avg_line_length": 23.758621215820312, "blob_id": "2ebe9fc3c6b54a1035d0809abb28ff8c0e9dfc8e", "content_id": "e6ab618ae651aa5f4e4870b7998470e4a79aca83", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1868, "license_type": "no_license", "max_line_length": 75, "num_lines": 58, "path": "/case/test_cart.py", "repo_name": "mtx-testfan/0116", "src_encoding": "UTF-8", "text": "import time\n\nimport allure\n\nimport pageObject\nfrom base.baseApi import Base\nfrom base.driver import Driver\nfrom pageAction.cart_action import Cart\nfrom pageAction.login_action import Login\n\n'''\ncase\n测试用例(N)\n1.起始动作是在哪个页面----\n原则:1.保证起始动作在相同的页面---相同的页面就是首页\n 2.目的回到首页\n 2.1 加入购物车测试用例(前一个测试用例)结束的时候 点击index标签---跳转到首页\n todo: 销毁函数 teardown_method()\n 2.2 删除购物车测试用例(后一个测试用例)开始的时候,get(http://121.42.15.146:9090/mtx/)---打开首页\n todo: 初始化函数 setup_method()\n\n'''\n@allure.feature('购物车功能的测试')\nclass TestCart:\n def setup_class(self):\n # 创建driver对象\n self.driver = Driver.get_drvier()\n # 依赖登录,调用成功登录的业务\n Login(self.driver).login_success()\n # 实例化一个Base对象\n self.base = Base(self.driver)\n # 创建Cart对象\n self.cart = Cart(self.driver)\n\n # 关闭driver\n def teardown_class(self):\n Driver.close_driver()\n\n # # 第一种办法 销毁函数---点击index首页\n # def teardown_method(self):\n # self.base.base_click_index()\n #第二种方法 初始化函数---get(url)\n def setup_method(self):\n self.driver.get(pageObject.url)\n\n # 测试用例:添加购物车\n @allure.title('添加购物车的测试用例')\n def test_add(self):\n self.cart.business_add_cart()\n time.sleep(3)\n assert '加入成功' in self.base.base_page_source\n\n # 删除购物车\n @allure.title('删除购物车的测试用例')\n def test_delete(self):\n self.cart.business_delete_cart()\n time.sleep(3)\n assert \"删除成功\" in self.base.base_page_source\n" }, { "alpha_fraction": 0.6576728224754333, "alphanum_fraction": 0.6610455513000488, "avg_line_length": 21.846153259277344, "blob_id": "6eb43299e786e79b5e2e7dbfc1c30f9a3d2b4e64", "content_id": "ed33b5b4e711ece49874556757ae73086e56bce4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 665, "license_type": "no_license", "max_line_length": 53, "num_lines": 26, "path": "/pageAction/order_action.py", "repo_name": "mtx-testfan/0116", "src_encoding": "UTF-8", "text": "import time\n\nfrom pageAction.actions_manager import ActionsManager\nfrom pageObject.page_index import PageIndex\n\n'''\n组合业务\n引入页面对象包里面的步骤\n继承\n'''\nclass Order(ActionsManager):\n # 组合业务:提交订单,并提示提交成功(断言)\n def business_order(self):\n self.pageindex.click_zk_skirt()\n self.pageindex.switch_window()\n self.pagegoodsdetail.click_pink()\n time.sleep(1)\n self.pagegoodsdetail.click_M()\n time.sleep(1)\n self.pagegoodsdetail.click_now_buy()\n self.pagebuy.click_payment()\n self.pagebuy.click_submit_order()\n\n\nif __name__ == '__main__':\n pass" }, { "alpha_fraction": 0.6162570714950562, "alphanum_fraction": 0.6219281554222107, "avg_line_length": 23.627906799316406, "blob_id": "6ca9670ef589cd84648a76d3fd96c297009af3b4", "content_id": "52235743ffea94775f1c88f2c32161d360ad264a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1390, "license_type": "no_license", "max_line_length": 66, "num_lines": 43, "path": "/pageAction/cart_action.py", "repo_name": "mtx-testfan/0116", "src_encoding": "UTF-8", "text": "import time\n\nfrom pageAction.actions_manager import ActionsManager\nfrom pageObject.page_index import PageIndex\n\n'''\n组合业务\n引入页面对象包里面的步骤\n继承\n'''\nclass Cart(ActionsManager):\n # 组合业务:添加购物车\n def business_add_cart(self):\n self.pageindex.click_zk_skirt()\n self.pageindex.switch_window()\n self.pagegoodsdetail.click_pink()\n time.sleep(2)\n self.pagegoodsdetail.click_M()\n time.sleep(2)\n self.pagegoodsdetail.click_add_cart()\n\n # 组合业务:删除购物车\n def business_delete_cart(self):\n '''\n 功能测试 手动如何操作 -----自动化动作就如何操作\n 删除商品之前我们要确保购物车里面有商品\n (推荐) 1. action层,先调用添加购物车这个业务(确保购物车里面有商品),再调用删除业务\n 1.1 动作能关联上\n 2. case层,调整顺序,插件pytest-ordering (不推荐:测试用例和测试用例之间是相互独立,不依赖)\n :return:\n '''\n # 调用添加业务\n self.business_add_cart()\n # 调用删除业务\n self.pagegoodsdetail.click_right_cart()\n self.pagecart.click_delete_button()\n self.pagecart.click_confirm_delete()\n #点击首页\n def click_index(self):\n pass\n\nif __name__ == '__main__':\n pass" } ]
18
MrsKamran/djangoPlants
https://github.com/MrsKamran/djangoPlants
e3fb7943d17ee72b8aaecb1400558df28e981260
4e7124ef3102fac6ee88cf1eacd506d736d8d5db
1a68e2eaebbdcddca9128dde9b431d997c9fb1c8
refs/heads/main
2023-01-13T22:43:07.265293
2020-11-20T17:02:08
2020-11-20T17:02:08
310,699,312
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6861826777458191, "alphanum_fraction": 0.6861826777458191, "avg_line_length": 60.0476188659668, "blob_id": "127dffe27987cd3e92ba7fb1461551b46068088c", "content_id": "1f32f63e78a95691dce56f4af0ce3aa3d18a2343", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1281, "license_type": "no_license", "max_line_length": 118, "num_lines": 21, "path": "/plants/urls.py", "repo_name": "MrsKamran/djangoPlants", "src_encoding": "UTF-8", "text": "from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('', views.home, name='home'),\n path('about/', views.about, name='about'),\n path('plants/', views.plants_index, name='index'),\n path('plants/<int:plant_id>', views.plants_detail, name='detail'),\n path('plants/create/', views.PlantCreate.as_view(), name='plants_create' ),\n path('plants/<int:pk>/update/', views.PlantUpdate.as_view(), name='plants_update' ),\n path('plants/<int:pk>/delete/', views.PlantDelete.as_view(), name='plants_delete' ),\n path('plants/<int:plant_id>/add_watering/', views.add_watering, name='add_watering'),\n path('accessories/', views.AccessoryList.as_view(), name='accessories_index'),\n path('accessories/<int:pk>/', views.AccessoryDetail.as_view(), name='accessories_detail'),\n path('accessories/create/', views.AccessoryCreate.as_view(), name='accessories_create'),\n path('accessories/<int:pk>/update/', views.AccessoryUpdate.as_view(), name='accessories_update'),\n path('accessories/<int:pk>/delete/', views.AccessoryDelete.as_view(), name='accessories_delete'),\n path('plants/<int:plant_id>/assoc_accessory/<int:accessory_id>/', views.assoc_accessory, name='assoc_accessory') ,\n path('accounts/signup/', views.signup, name='signup'),\n]" }, { "alpha_fraction": 0.6722689270973206, "alphanum_fraction": 0.6834734082221985, "avg_line_length": 25.407407760620117, "blob_id": "143a4ea3af171099b8053596bff67ab0e02184fc", "content_id": "7ffcb67fa87f3d657124850d9932f1dfb1f4dce8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1428, "license_type": "no_license", "max_line_length": 91, "num_lines": 54, "path": "/plants/models.py", "repo_name": "MrsKamran/djangoPlants", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.urls import reverse\nfrom datetime import date\nfrom django.contrib.auth.models import User\n# Create your models here.\n\nFERTILIZERS =(\n ('O', 'Once a Month'),\n ('T', 'Twice a Month')\n)\nWATERS =(\n ('O', 'Once a Week'),\n ('T', 'Twice a Week')\n) \nclass Accessory(models.Model):\n name = models.CharField(max_length=50)\n color = models.CharField(max_length=20)\n\n def __str__(self):\n return self.name\n\n def get_absolute_url(self):\n return reverse('accessories_detail', kwargs={'pk': self.id})\n\nclass Plant(models.Model):\n name = models.CharField(max_length=100)\n care_level = models.CharField(max_length=100)\n description = models.TextField()\n age = models.IntegerField()\n accessories = models.ManyToManyField(Accessory)\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.name\n\n def get_absolute_url(self):\n return reverse('detail', kwargs={'plant_id':self.id})\n\nclass Watering(models.Model):\n date = models.DateField('Watering Date')\n fertilizer = models.CharField(\n max_length=1,\n choices = FERTILIZERS,\n default = FERTILIZERS[0][0]\n )\n water = models.CharField(\n max_length=1,\n choices=WATERS,\n default = WATERS[0][0]\n )\n plant = models.ForeignKey(Plant, on_delete=models.CASCADE)\n \n def __str__(self):\n return f\"{self.get_fertilizer_display()} and {self.get_water_display()} on {self.date}\"\n\n\n" }, { "alpha_fraction": 0.523809552192688, "alphanum_fraction": 0.5566502213478088, "avg_line_length": 25.478260040283203, "blob_id": "3861847be45190c29b8bd2c3483c7ad9075dfaa1", "content_id": "84f0b1e16672cb274b070d902e381f23f6823501", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 609, "license_type": "no_license", "max_line_length": 117, "num_lines": 23, "path": "/plants/migrations/0003_auto_20201110_2141.py", "repo_name": "MrsKamran/djangoPlants", "src_encoding": "UTF-8", "text": "# Generated by Django 3.1.1 on 2020-11-10 21:41\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('plants', '0002_watering'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='watering',\n name='water',\n field=models.CharField(choices=[('O', 'Once a Week'), ('T', 'Twice a Week')], default='O', max_length=1),\n ),\n migrations.AlterField(\n model_name='watering',\n name='date',\n field=models.DateField(verbose_name='Watering Date'),\n ),\n ]\n" }, { "alpha_fraction": 0.5080645084381104, "alphanum_fraction": 0.5551075339317322, "avg_line_length": 27.615385055541992, "blob_id": "9cfc574dba81938ac72e7f5a919c7cf6f578daca", "content_id": "44574e294b1f562e85b897cb0873c0e0e4eb12a3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 744, "license_type": "no_license", "max_line_length": 114, "num_lines": 26, "path": "/plants/migrations/0004_auto_20201111_1913.py", "repo_name": "MrsKamran/djangoPlants", "src_encoding": "UTF-8", "text": "# Generated by Django 3.1.1 on 2020-11-11 19:13\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('plants', '0003_auto_20201110_2141'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Accessory',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=50)),\n ('color', models.CharField(max_length=20)),\n ],\n ),\n migrations.AddField(\n model_name='plant',\n name='accessories',\n field=models.ManyToManyField(to='plants.Accessory'),\n ),\n ]\n" }, { "alpha_fraction": 0.5514993667602539, "alphanum_fraction": 0.5775749683380127, "avg_line_length": 32.34782791137695, "blob_id": "f70ceff86411b1b9bda69ba4964405d87848c6f8", "content_id": "f408e1e1b39710ca60e3c026b0b8772d0cfb8d81", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 767, "license_type": "no_license", "max_line_length": 133, "num_lines": 23, "path": "/plants/migrations/0002_watering.py", "repo_name": "MrsKamran/djangoPlants", "src_encoding": "UTF-8", "text": "# Generated by Django 3.1.1 on 2020-11-10 20:47\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('plants', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Watering',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('date', models.DateField()),\n ('fertilizer', models.CharField(choices=[('O', 'Once a Month'), ('T', 'Twice a Month')], default='O', max_length=1)),\n ('plant', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='plants.plant')),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.8131313323974609, "alphanum_fraction": 0.8131313323974609, "avg_line_length": 27.285715103149414, "blob_id": "20545e45d8cb2eae797150f9d3e18188029fcfd5", "content_id": "cdee36544ad100647c8093f7e4864c1efed2a50d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 198, "license_type": "no_license", "max_line_length": 46, "num_lines": 7, "path": "/plants/admin.py", "repo_name": "MrsKamran/djangoPlants", "src_encoding": "UTF-8", "text": "from django.contrib import admin\n\n# Register your models here.\nfrom .models import Plant, Watering, Accessory\nadmin.site.register(Plant)\nadmin.site.register(Watering)\nadmin.site.register(Accessory)\n" } ]
6
praython/agiliq-building_api_django
https://github.com/praython/agiliq-building_api_django
223cb4dfde0035514858f0c8d64fc3c12f1ebb99
246b4654856a3f7b43bf9f923e7516fef9609949
78de58b7575557aeccf1bbfe3247e1d800b933be
refs/heads/master
2020-07-09T15:22:48.555979
2019-08-24T15:21:41
2019-08-24T15:21:41
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.49857550859451294, "alphanum_fraction": 0.5527065396308899, "avg_line_length": 18.5, "blob_id": "f92441b3f57c33f23d889475c2f62493b6b12f83", "content_id": "b964bc55129e85056b536897d8d476a42d600639", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 351, "license_type": "no_license", "max_line_length": 47, "num_lines": 18, "path": "/polls/migrations/0002_auto_20190823_1715.py", "repo_name": "praython/agiliq-building_api_django", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.4 on 2019-08-23 17:15\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('polls', '0001_initial'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='poll',\n old_name='pub_dat',\n new_name='pub_date',\n ),\n ]\n" }, { "alpha_fraction": 0.7888888716697693, "alphanum_fraction": 0.7888888716697693, "avg_line_length": 44, "blob_id": "2b6ae4a1e717ca22b24219f7405f519dfac4299f", "content_id": "14d6bf587818596fc80b6faea5eb7931fb21373b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 90, "license_type": "no_license", "max_line_length": 60, "num_lines": 2, "path": "/README.md", "repo_name": "praython/agiliq-building_api_django", "src_encoding": "UTF-8", "text": "# agiliq-building_api_django\nBuilding API Django from https://books.agiliq.com/en/latest/\n" } ]
2
zbcjackson/mombaby
https://github.com/zbcjackson/mombaby
a27cdf17da3240b5503f4f3d1dab0788025dce13
18f26d25c1ab3454a04a98d66d5a2cdffa0ad6c4
6353859e1a8f5d5faa965425d0d4058a4d842f50
refs/heads/master
2020-04-19T08:09:03.317629
2016-08-29T07:57:12
2016-08-29T07:57:12
66,823,697
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5486486554145813, "alphanum_fraction": 0.6135135293006897, "avg_line_length": 29.91666603088379, "blob_id": "0973ba48286b664ab6dd17bf57dcd3e92d12dcac", "content_id": "d99e0b41713c63d220728a8f465ba2eaff324ce8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 386, "license_type": "no_license", "max_line_length": 123, "num_lines": 12, "path": "/db.py", "repo_name": "zbcjackson/mombaby", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nimport MySQLdb\n\ndef save():\n conn = MySQLdb.connect(user='ttq', passwd='ttq', db='mombaby', host='192.168.59.103', charset=\"utf8\", use_unicode=True)\n cursor = conn.cursor()\n cursor.execute(\"\"\"INSERT INTO questions(question, answers) VALUES (%s, %s)\"\"\", (\"求开奶师\", \"电话号码13912348903\"))\n conn.commit()\n\nif __name__ == \"__main__\":\n save()" }, { "alpha_fraction": 0.5963302850723267, "alphanum_fraction": 0.5986238718032837, "avg_line_length": 24.705883026123047, "blob_id": "0f32a11c5f8d84a81f664782ac7b4d489c2b18bc", "content_id": "489f2a19a506b0a8a14ba94e1275dda14d9597ff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 448, "license_type": "no_license", "max_line_length": 53, "num_lines": 17, "path": "/send_mail.py", "repo_name": "zbcjackson/mombaby", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom email.mime.text import MIMEText\nfrom email.header import Header\nfrom subprocess import Popen, PIPE\n\ndef send():\n print \"get email info...\"\n msg = MIMEText('TEst')\n msg[\"From\"] = 'zbcjackson@gmail.com'\n msg[\"To\"] = 'zbcjackson@odd-e.com'\n msg[\"Subject\"] = '[母婴问答]' + '\\n求助'\n p = Popen([\"/usr/sbin/sendmail\", \"-t\"], stdin=PIPE)\n res = p.communicate(msg.as_string())\n print 'mail sended ...'\n\nif __name__ == \"__main__\":\n send()" }, { "alpha_fraction": 0.7767441868782043, "alphanum_fraction": 0.7829457521438599, "avg_line_length": 29.714284896850586, "blob_id": "5c3a87dde8b8a4c54e36c71af865ed176f41f9e9", "content_id": "4f46d7089d3e442004427af563a40e736421aaaf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 645, "license_type": "no_license", "max_line_length": 82, "num_lines": 21, "path": "/main.py", "repo_name": "zbcjackson/mombaby", "src_encoding": "UTF-8", "text": "from twisted.internet import reactor\nfrom twisted.internet import task\nfrom scrapy.crawler import Crawler\nfrom scrapy import log, signals\nfrom mombaby.spiders.babytree import BabytreeSpider\nfrom scrapy.utils.project import get_project_settings\n\ndef crawl():\n spider = BabytreeSpider()\n settings = get_project_settings()\n crawler = Crawler(settings)\n # crawler.signals.connect(reactor.stop, signal=signals.spider_closed)\n crawler.configure()\n crawler.crawl(spider)\n crawler.start()\n\nl = task.LoopingCall(crawl)\nl.start(3600) # call every hour\n\nlog.start()\nreactor.run() # the script will block here until the spider_closed signal was sent\n" }, { "alpha_fraction": 0.719298243522644, "alphanum_fraction": 0.7368420958518982, "avg_line_length": 21.799999237060547, "blob_id": "3b44aab352cbfbf874cd6e97fb2a9d6bf666d517", "content_id": "d8a5491d29fd96fa7937126c290df60e620b96ae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 114, "license_type": "no_license", "max_line_length": 35, "num_lines": 5, "path": "/Dockerfile", "repo_name": "zbcjackson/mombaby", "src_encoding": "UTF-8", "text": "FROM python:2.7\n# COPY pip.conf /root/.pip/pip.conf\nADD . /code\nWORKDIR /code\nRUN pip install -r requirements.txt\n" }, { "alpha_fraction": 0.6303063035011292, "alphanum_fraction": 0.6351423859596252, "avg_line_length": 47.97368240356445, "blob_id": "235d539b63d55906e37330e6fb508774967e4593", "content_id": "07b03ed826f3852ebe4ae954ecdd5155d82be2e1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1905, "license_type": "no_license", "max_line_length": 158, "num_lines": 38, "path": "/mombaby/spiders/babytree.py", "repo_name": "zbcjackson/mombaby", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport scrapy\n\nfrom scrapy.contrib.spiders import CrawlSpider, Rule\nfrom scrapy.contrib.linkextractors import LinkExtractor\nfrom datetime import datetime\n\n\nfrom mombaby.items import QuestionItem\n\nclass BabytreeSpider(CrawlSpider):\n name = \"babytree\"\n allowed_domains = [\"babytree.com\"]\n start_urls = ['http://www.babytree.com/s.php?q=%s&c=&range=d' % keyword for keyword in ['开奶', '催奶', '催乳', '奶结', '追奶', '乳腺', '肿胀', '通乳', '通奶', '催奶', '回奶']]\n rules =[\n Rule(LinkExtractor(allow=('/ask/detail/'), restrict_xpaths=('//div[@class=\"search_col_2\"]')), callback='parse_question'),\n # Rule(LinkExtractor(allow=('/community/.+\\.html'), restrict_xpaths=('//div[@class=\"search_col_2\"]')), callback='parse_community'),\n Rule(LinkExtractor(allow=('c=community'), restrict_xpaths=('//ul[@class=\"search_tab\"]')))\n # Rule(LinkExtractor(allow=('pg=\\\\d+'), restrict_xpaths=('//div[@class=\"pagejump\"]')))\n ]\n\n def parse_question(self, response):\n question = QuestionItem()\n question['question'] = response.css('h1::text').extract()[0]\n question['question_time'] = response.css('div.qa-contributor abbr::text').extract()[0]\n question['answers'] = '\\n'.join(response.css('div.answer-text::text,ul.answer-comments li::text').extract())\n question['last_updated'] = datetime.now()\n question['url'] = response.url\n return question\n\n def parse_community(self, response):\n question = QuestionItem()\n question['question'] = response.css('h1::text').extract()[0].strip()\n # question['question_time'] = response.css('div.postTime::text').extract()[0]\n question['answers'] = '\\n'.join(response.css('div.postContent').extract())\n question['last_updated'] = datetime.now()\n question['url'] = response.url\n return question\n" }, { "alpha_fraction": 0.6301295757293701, "alphanum_fraction": 0.6393088698387146, "avg_line_length": 33.27777862548828, "blob_id": "9304227d6d7d341aa8b165244384e9f324d0920b", "content_id": "febcd959d6c246a1487d20deedf6858621726839", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1860, "license_type": "no_license", "max_line_length": 128, "num_lines": 54, "path": "/mombaby/pipelines.py", "repo_name": "zbcjackson/mombaby", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html\nfrom scrapy.contrib.exporter import JsonItemExporter\n\nfrom email.mime.text import MIMEText\nfrom email.header import Header\nfrom subprocess import Popen, PIPE\nimport MySQLdb\n\nclass JsonExportPipeline(object):\n def __init__(self):\n self.file = open('babytree.json', 'w+b')\n\n def open_spider(self, spider):\n self.exporter = JsonItemExporter(self.file, ensure_ascii=False)\n self.exporter.start_exporting();\n\n def close_spider(self, spider):\n self.exporter.finish_exporting()\n self.file.close()\n\n def process_item(self, item, spider):\n self.exporter.export_item(item)\n return item\n\nclass EmailPipeline(object):\n def process_item(self, item, spider):\n print \"get email info...\"\n msg = MIMEText(item['question'] + '<br/>' + item['url'] + '<br/>' + item['answers'], 'html', 'utf-8')\n msg[\"From\"] = 'zbcjackson@gmail.com'\n msg[\"To\"] = 'zbcjackson@gmail.com;ace0918@126.com;qinwen.shi@gmail.com;tengzhenyu@gmail.com'\n msg[\"Subject\"] = Header(u\"[母婴问答]\" + item['question'], 'utf-8')\n p = Popen([\"/usr/sbin/sendmail\", \"-t\"], stdin=PIPE)\n res = p.communicate(msg.as_string())\n print 'mail sended ...'\n\n return item\n\nclass MySQLPipeline(object):\n def __init__(self):\n self.conn = MySQLdb.connect(user='ttq', passwd='ttq', db='mombaby', host='192.168.59.103', charset=\"utf8\", use_unicode=True)\n self.cursor = self.conn.cursor()\n\n def process_item(self, item, spider):\n try:\n self.cursor.execute(\"\"\"INSERT INTO questions(question, answers) VALUES (%s, %s)\"\"\", (item['question'], item['answers']))\n self.conn.commit()\n except MySQLdb.Error, e:\n print \"Error %d: %s\" % (e.args[0], e.args[1])\n return item\n\n" }, { "alpha_fraction": 0.6895424723625183, "alphanum_fraction": 0.6928104758262634, "avg_line_length": 23.479999542236328, "blob_id": "d9dcfdb3f2157c5bf848f00fbae98980f53610b2", "content_id": "e3b917aeefbc75cc081d399589cfd6976d9fcbb0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 612, "license_type": "no_license", "max_line_length": 55, "num_lines": 25, "path": "/mombaby/items.py", "repo_name": "zbcjackson/mombaby", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Define here the models for your scraped items\n#\n# See documentation in:\n# http://doc.scrapy.org/en/latest/topics/items.html\n\nimport scrapy\n\nfrom scrapy.contrib.loader.processor import Join\n\ndef encoding(value):\n return value.encode('utf-8').decode(\"unicode_escape\")\n\ndef join_multiline(value):\n return '\\n'.join(value)\n\nclass QuestionItem(scrapy.Item):\n # define the fields for your item here like:\n # name = scrapy.Field()\n question = scrapy.Field()\n question_time = scrapy.Field()\n answers = scrapy.Field()\n last_updated = scrapy.Field()\n url = scrapy.Field()\n" } ]
7
objectc/smart_remote
https://github.com/objectc/smart_remote
3307133694ab05d5f6658e32ed98ed5f127872ba
86a71c76fd04461e3071b285eb3683f643902aaf
cf5135c236651abcc84526cb6d8041c452713703
refs/heads/master
2020-04-02T16:24:55.092208
2019-10-20T03:36:27
2019-10-20T03:36:27
154,611,263
1
1
null
null
null
null
null
[ { "alpha_fraction": 0.5609177350997925, "alphanum_fraction": 0.5632911324501038, "avg_line_length": 24.795917510986328, "blob_id": "b6f8c76417b16d46b08f2f8fe1f9be9e1a8ebffe", "content_id": "f3954504bc2e7321a89fab41066e862ee0ed5538", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1264, "license_type": "permissive", "max_line_length": 71, "num_lines": 49, "path": "/alexa.py", "repo_name": "objectc/smart_remote", "src_encoding": "UTF-8", "text": "import logging\nimport os\n\nfrom flask import Flask\nfrom flask_ask import Ask, request, session, question, statement\n\napp = Flask(__name__)\nask = Ask(app, '/')\nlogging.getLogger('flask_ask').setLevel(logging.DEBUG)\n\nCOMMANDS = {\n 'power': 'POWER', \n 'cool': 'COOL', \n 'speed up': 'SPEED_UP', \n 'speed down':'SPEED_DOWN', \n 'temperature up': 'TEMP_UP',\n 'temperature down': 'TEMP_DOWN', \n 'timer': 'TIMER', \n 'rotate': 'ROTATE', \n 'narrow':'NARROW', \n 'wide':'WIDE'\n }\n\n@ask.launch\ndef launch():\n pass\n\n\n@ask.intent('RemoteIntent', mapping = {'command': 'command'})\ndef remote(command, room):\n if command in COMMANDS.keys():\n command_str = 'irsend send_once Dyson ' + COMMANDS[command]\n os.system(command_str)\n return statement('dyson '+command)\n else:\n return statement('no command'+command)\n\n\n@ask.session_ended\ndef session_ended():\n return \"{}\", 200\n\n\nif __name__ == '__main__':\n if 'ASK_VERIFY_REQUESTS' in os.environ:\n verify = str(os.environ.get('ASK_VERIFY_REQUESTS', '')).lower()\n if verify == 'false':\n app.config['ASK_VERIFY_REQUESTS'] = False\n app.run(debug=True)\n" }, { "alpha_fraction": 0.5916334390640259, "alphanum_fraction": 0.5996015667915344, "avg_line_length": 23.487804412841797, "blob_id": "19ad97405b10cbd0e214868d3b738bf34e99e359", "content_id": "aedc647aff36e47ca57f42620fd2dd1123b74bc0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1004, "license_type": "permissive", "max_line_length": 71, "num_lines": 41, "path": "/server.py", "repo_name": "objectc/smart_remote", "src_encoding": "UTF-8", "text": "import logging\nimport os\nfrom flask import Flask\nfrom flask import request\nfrom flask import make_response\nfrom flask import abort, jsonify\n\nfrom flask import Flask\n\napp = Flask(__name__)\nlogging.getLogger('flask_ask').setLevel(logging.DEBUG)\n\nCOMMANDS = {\n 'POWER': 'POWER',\n 'COOL': 'COOL',\n 'SPEED_UP': 'SPEED_UP',\n 'SPEED_DOWN': 'SPEED_DOWN',\n 'TEMP_UP': 'TEMP_UP',\n 'TEMP_DOWN': 'TEMP_DOWN',\n 'TIMER': 'TIMER',\n 'ROTATE': 'ROTATE',\n 'NARROW': 'NARROW',\n 'WIDE': 'WIDE'\n}\n\n\n@app.route(\"/dyson\", methods=['GET'])\ndef getDyson():\n command = request.args.get('command')\n if command:\n if command in COMMANDS.keys():\n command_str = 'irsend send_once Dyson ' + COMMANDS[command]\n os.system(command_str)\n resData = {'msg': 'success'}\n response = jsonify(resData)\n return response\n return jsonify({'msg': 'no commands found'})\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5000, debug=True)\n" }, { "alpha_fraction": 0.6787564754486084, "alphanum_fraction": 0.7046632170677185, "avg_line_length": 20.44444465637207, "blob_id": "7d1720501702ffc6f8baf22c9abbf4bded599b7c", "content_id": "3e3b5a0060b6695e5b637eb4ca05f43a324f88cf", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 193, "license_type": "permissive", "max_line_length": 49, "num_lines": 9, "path": "/service.py", "repo_name": "objectc/smart_remote", "src_encoding": "UTF-8", "text": "import RPi.GPIO as GPIO\nimport time\nimport os\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(27, GPIO.IN)\nwhile True:\n if(GPIO.input(27)):\n os.system(\"irsend send_once Dyson POWER\")\ntime.sleep(1)\n" }, { "alpha_fraction": 0.7400346398353577, "alphanum_fraction": 0.762565016746521, "avg_line_length": 47.08333206176758, "blob_id": "76d08f41224ccbcf7158d7367bc19965f7a36b17", "content_id": "1d12bb2cb83463b1c5cb294c39b737db6d77821d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1154, "license_type": "permissive", "max_line_length": 123, "num_lines": 24, "path": "/README.md", "repo_name": "objectc/smart_remote", "src_encoding": "UTF-8", "text": "# smart_remote\nI lost my remote of Dyson AM09. So that I use Raspberry Pi, IR transmitter and Amazon Ehco to build a smart remote instead.\n\n[![Smart Remote](https://img.youtube.com/vi/TKRvqR-jYNs/0.jpg)](https://www.youtube.com/watch?v=TKRvqR-jYNs \"Smart Remote\")\n\n## Flow Chart\n![Flow Chart](https://github.com/objectc/smart_remote/blob/master/res/flowchart.png?raw=true)\n\n## [Flask-Ask](https://flask-ask.readthedocs.io/en/latest/index.html)\nAlexa Skills Kit Development for Amazon Echo Devices with Python\n\n## [ngrok](https://ngrok.com/product)\nngrok exposes local servers behind NATs and firewalls to the public internet over secure tunnels.\n\n## [LIRC](http://www.lirc.org/html/index.html \"LIRC\")\n\nLIRC is a package that allows you to decode and send infra-red signals of many (but not all) commonly used remote controls.\n[How to install and config LIRC](https://gist.github.com/prasanthj/c15a5298eb682bde34961c322c95378b#file-lirc-pi3-txt)\n## IR transmitter\n\n![IR transmitter](https://github.com/objectc/smart_remote/blob/master/res/IR_Remote.jpg?raw=true)\n## GPIO\n\n![GPIO](https://github.com/objectc/smart_remote/blob/master/res/GPIO.jpg?raw=true)\n" } ]
4
lumious/raspberry_wakeonlan
https://github.com/lumious/raspberry_wakeonlan
6ea200c0692e029b39257157feab51103a53ad67
4dd26421fc033f7e160115176bdd37a072c1fdfb
1efb5e724b6bb332f66c982998abbd083e521ad1
refs/heads/master
2021-01-01T16:23:21.912293
2017-07-25T08:03:34
2017-07-25T08:03:34
97,821,572
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.5207852125167847, "alphanum_fraction": 0.5461893677711487, "avg_line_length": 23.101449966430664, "blob_id": "a60f65fd276b9b4c357e7d3a490cd20168145e63", "content_id": "1fe59daa12abcc7ecb62a69eeec9171e862f8aac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1788, "license_type": "no_license", "max_line_length": 62, "num_lines": 69, "path": "/app.py", "repo_name": "lumious/raspberry_wakeonlan", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\r\nfrom flask import Flask,render_template,request,jsonify\r\nimport re\r\nimport socket\r\nimport struct\r\n\r\napp = Flask(__name__)\r\n\r\n@app.route('/')\r\ndef index():\r\n return render_template('index.html')\r\n\r\n@app.route('/wake', methods=['POST'])\r\ndef wake():\r\n response = {}\r\n\r\n try:\r\n mac = request.values.get('mac_addr')\r\n if mac == None:\r\n raise ValueError\r\n if not check_mac(mac):\r\n raise ValueError\r\n wake_on_lan(mac)\r\n response['code']=1\r\n response['message']='请求成功'\r\n\r\n except ValueError:\r\n response['code']=0\r\n response['message'] = '请输入正确的MAC地址'\r\n\r\n return jsonify(response)\r\n\r\n# 检查mac地址\r\ndef check_mac(mac_addr):\r\n # 长度检查\r\n if len(mac_addr) == 12:\r\n pass\r\n elif len(mac_addr) == 17:\r\n mac_addr = mac_addr.replace(':', '')\r\n else:\r\n return False\r\n # 正则检查\r\n pattern = re.compile(r'[0-9A-Fa-f]{12}')\r\n result = pattern.match(mac_addr)\r\n if result is not None:\r\n return True\r\n else:\r\n return False\r\n\r\ndef wake_on_lan(mac):\r\n if len(mac) == 12:\r\n pass\r\n elif len(mac) == 17:\r\n macaddress = mac.replace(':', '')\r\n else:\r\n raise ValueError('mac地址有误')\r\n data = 'FFFFFFFFFFFF' + mac * 16\r\n byte_data = b''\r\n for i in range(0, len(data), 2):\r\n byte_dat = struct.pack('B', int(data[i: i + 2], 16))\r\n byte_data = byte_data + byte_dat\r\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)\r\n sock.sendto(byte_data, ('255.255.255.255', 7))\r\n sock.close()\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=False,port=5000,host='0.0.0.0')\r\n" }, { "alpha_fraction": 0.8484848737716675, "alphanum_fraction": 0.8484848737716675, "avg_line_length": 15.5, "blob_id": "14f55e001e7e0ca41e8c42c81b315cde18541378", "content_id": "962ef0d05aaf1099b96b6b80c7b2659d9dfa3d6f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 47, "license_type": "no_license", "max_line_length": 21, "num_lines": 2, "path": "/README.md", "repo_name": "lumious/raspberry_wakeonlan", "src_encoding": "UTF-8", "text": "# raspberry_wakeonlan\n树莓派WOL唤醒主机\n" } ]
2
Jaturavitv10/Drgitgud
https://github.com/Jaturavitv10/Drgitgud
2921b6160bdba9f83403be16b8d33387cbb951ca
9d2a3fdf871cffb4bfbb6272fa6c26ed47a3698b
f4d9d88e3f7ac3ff94f1be062d9398a1e0039dc3
refs/heads/master
2021-01-21T20:30:29.711230
2017-06-28T09:25:03
2017-06-28T09:25:03
92,246,063
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.43002545833587646, "alphanum_fraction": 0.4615775942802429, "avg_line_length": 35.38888931274414, "blob_id": "1ef65d92e105f49b8b09090dd56f371a425c9303", "content_id": "0028f1679bca3430cdd58b917a1f88c93398d0ce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1965, "license_type": "no_license", "max_line_length": 150, "num_lines": 54, "path": "/GFP Project/Script.py", "repo_name": "Jaturavitv10/Drgitgud", "src_encoding": "UTF-8", "text": "import dash\nimport pandas as pd\nfrom dash.dependencies import Input, Output\nimport dash_core_components as dcc\nimport dash_html_components as html\n\ndf = pd.read_csv('FPD-non-redundant-processed.csv')\napp = dash.Dash()\n\n#-----------------------------------------------------------------------------\n#-----------------------------------------------------------------------------\n\n#This is the part where you create the graph\n\n\n\n#-----------------------------------------------------------------------------\n#----------------------------------------------------------------------------\n\n# This is the part where body goes in\n\napp.layout = html.Div([\n html.Div([html.H2('FPD : A Database of Fluorescent Proteins',\n style={\n 'position': 'relative',\n 'top': '0px',\n 'left': '10px',\n 'font-family': 'Dosis',\n 'display': 'inline',\n 'font-size': '6.0rem',\n 'color': '#4D637F'\n }),\n ]),\n html.Div([\n html.P('Center of Data Mining and Biomedical Informatics, Faculty of Medical Technology, Mahidol University'),\n ], style={'margin-left': '10px','font-size' : '1.5rem'}),\n dcc.Slider(\n min=,\n max=9,\n marks={i: 'Label {}'.format(i) for i in range(10)},\n value=5,)\n ])\n\nexternal_css = [\"https://cdnjs.cloudflare.com/ajax/libs/skeleton/2.0.4/skeleton.min.css\",\n \"//fonts.googleapis.com/css?family=Raleway:400,300,600\",\n \"//fonts.googleapis.com/css?family=Dosis:Medium\",\n \"https://cdn.rawgit.com/plotly/dash-app-stylesheets/0e463810ed36927caf20372b6411690692f94819/dash-drug-discovery-demo-stylesheet.css\"]\n\n\nfor css in external_css:\n app.css.append_css({\"external_url\": css})\n \nif __name__ == '__main__':\n app.run_server(debug=True)\n" } ]
1
CalebWinston/patron
https://github.com/CalebWinston/patron
81f91aceb46d70ae6dbbcc140f266f6ccc6c69e3
58f9e3e70e00197e2726b14ef71cdd438cb555f5
1098791daa65d710a465c7a079daf494c563c9f6
refs/heads/master
2020-04-14T11:04:42.125642
2019-01-02T05:35:26
2019-01-02T05:35:26
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7284359931945801, "alphanum_fraction": 0.728909969329834, "avg_line_length": 26.402597427368164, "blob_id": "5bde2c17a9bf852d54dffdb0d49d4c086508a671", "content_id": "3da54ae900610c8617b330e60d96e7a2146030e5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2110, "license_type": "permissive", "max_line_length": 65, "num_lines": 77, "path": "/app/__init__.py", "repo_name": "CalebWinston/patron", "src_encoding": "UTF-8", "text": "from config import Config\nfrom copy import deepcopy\nfrom flask import Flask\nfrom flask_admin import Admin\nfrom flask_apscheduler import APScheduler\nfrom flask_blogging_patron import BloggingEngine, SQLAStorage\nfrom flask_bootstrap import Bootstrap\nfrom flask_login import LoginManager\nfrom flask_mail import Mail\nfrom flask_migrate import Migrate\nfrom flask_principal import Permission, RoleNeed\nfrom flask_sqlalchemy import SQLAlchemy\n\n\n# extensions\nbootstrap = Bootstrap()\ndb = SQLAlchemy()\nmigrate = Migrate()\nglobal sql_storage\nblog_engine = BloggingEngine()\nlogin = LoginManager()\nlogin.login_view = 'auth.login'\nlogin.login_message_category = 'info'\nmail = Mail()\nscheduler = APScheduler()\nadmin = Admin(name='LibrePatron', template_mode='bootstrap3')\n\n# global\nglobal temp_bp\n\n# permissions - flask_principal objects created by BloggingEngine\nprincipals = blog_engine.principal\nadmin_permission = Permission(RoleNeed('admin'))\n\n\ndef create_app(config_class=Config):\n app = Flask(__name__)\n app.config.from_object(config_class)\n bootstrap.init_app(app)\n db.init_app(app)\n with app.app_context():\n global sql_storage\n sql_storage = SQLAStorage(db=db)\n migrate.init_app(app, db)\n login.init_app(app)\n mail.init_app(app)\n admin.init_app(app)\n blog_engine.init_app(app, sql_storage)\n scheduler.init_app(app)\n scheduler.start()\n\n # deepcopy auto-generated flask_blogging bp, then delete it\n global temp_bp\n temp_bp = deepcopy(app.blueprints['blogging'])\n del app.blueprints['blogging']\n\n # blueprints\n from app.api import bp as api_bp\n from app.auth import bp as auth_bp\n from app.blogging import bp as blogging_bp\n from app.main import bp as main_bp\n app.register_blueprint(auth_bp, url_prefix='/auth')\n app.register_blueprint(api_bp, url_prefix='/api')\n app.register_blueprint(\n blogging_bp,\n url_prefix=app.config.get('BLOGGING_URL_PREFIX')\n )\n app.register_blueprint(main_bp)\n\n # tasks\n from app import tasks\n\n return app\n\n\nfrom app import admin_views\nfrom app import models, subscriptions\n" }, { "alpha_fraction": 0.7719298005104065, "alphanum_fraction": 0.7719298005104065, "avg_line_length": 13.25, "blob_id": "a3c606e6d52c9557eda40f93c69462b743d69793", "content_id": "53e63b12a206ee930e82dc6821ef4699e437374f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 57, "license_type": "permissive", "max_line_length": 24, "num_lines": 4, "path": "/boot.sh", "repo_name": "CalebWinston/patron", "src_encoding": "UTF-8", "text": "#!/bin/sh\nflask db upgrade\nwait\nexec gunicorn patron:app\n" }, { "alpha_fraction": 0.6063829660415649, "alphanum_fraction": 0.6124619841575623, "avg_line_length": 46, "blob_id": "13ae5a429fd7bfb51037e140f3ad8a9d081f78fd", "content_id": "00796417c7b496da784c5aebdee7966f8c4bfa4c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 658, "license_type": "permissive", "max_line_length": 137, "num_lines": 14, "path": "/app/templates/auth/account.html", "repo_name": "CalebWinston/patron", "src_encoding": "UTF-8", "text": "{% extends \"blogging/base.html\" %}\n\n{% block main %}\n <h1>Account Information</h1>\n <p>Plan Name: {{ current_user.role }}</p>\n <p>Expiration: {{ expires }}</p>\n <p>Note that if you made a payment today, your full 30 day extension will not be reflected until the blockchain confirms payment.</p>\n <a href=\"{{ url_for('main.support') }}\" class=\"btn btn-default\">Make a Payment or Change Plans</a>\n {% if opt_out %}\n <a href=\"{{ url_for('auth.mail_opt') }}\" class=\"btn btn-default\">Opt In to Emails</a>\n {% else %}\n <a href=\"{{ url_for('auth.mail_opt') }}\" class=\"btn btn-default\">Opt Out of Emails</a>\n {% endif %}\n{% endblock %}\n" }, { "alpha_fraction": 0.7805745601654053, "alphanum_fraction": 0.7879617214202881, "avg_line_length": 48.39189147949219, "blob_id": "c08feeedcf4552db810ce7a13dc524af20000516", "content_id": "a83667090162dcd300f26cb05296876ccaafb3d5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3655, "license_type": "permissive", "max_line_length": 298, "num_lines": 74, "path": "/README.md", "repo_name": "CalebWinston/patron", "src_encoding": "UTF-8", "text": "<h1>LibrePatron: A Self-Hosted Patreon Alternative Backed by BTCPay</h1>\n\nCopyright (C) 2018 Jeff Vandrew Jr\nLatest Stable Release: 0.1.27\n\nPatreon is a popular service that allows content creators to receive contributions from supporters on a recurring basis. Unfortunately, Patreon is also a dedicated enemy of the concept of free speech as an important civic virtue. Patreon is known to arbitarily ban its creators for \"thought crime.\"\n\nUnfortunately most Patreon alternatives to date do not implement all of Patreon's main features, namely:\n\n* Main page to entice new subscribers\n* Google Analytics\n* Protected page to post updates (viewable by subscribers only)\n* Automatic bulk emailing of updates to subscribers\n* Managing billing and subscription expiration\n* Automatic monthly billing via email\n\nWhile still in alpha status, LibrePatron implements all of these features. \n\nPortions of this package rely on a fork of the Flask-Blogging package by Gouthaman Balaraman.\n\nIf you're a creator reading this unconcerned with free speech issues, Patreon still takes a percentage of your earnings, which can be avoided by using LibrePatron.\n\n<h2>Improvements Roadmap</h2>\n\n1. Fiat integration. We're not in a 100% Bitcoin world yet (unfortunately). \n2. Subscriber export.\n3. Allowing subsribers to comment on updates is turned off by default, but is permitted by entering Disqus information in the configuration file. If turning this feature on is popular, factoring out Disqus could be a long term improvement.\n\n<h2>Installation</h2>\n\nA docker-compose is provided that automatically installs LibrePatron along with nginx and obtains SSL certificates, all in a few easy steps (to be executed from `$HOME` directory):\n\n```bash\nwget https://raw.githubusercontent.com/JeffVandrewJr/patron/master/librepatron.env\n\n# open librepatron.env and fill in the necessary info as mentioned in the file comments, and then save\nnano librepatron.env\n\nmkdir pricing\ncd pricing\nwget -O pricing.yaml https://raw.githubusercontent.com/JeffVandrewJr/patron/master/pricing.yaml.sample\n\n# open pricing.yaml, enter your subscription plans, and then save it\nnano pricing.yaml\n\ncd ..\nsudo docker network create nginx-net\nwget https://raw.githubusercontent.com/JeffVandrewJr/patron/master/docker-compose.yml\nsudo docker-compose up -d\n```\nYour site will then be launched and operational!\n\nUsers get a one day subscription as soon as they pay the BTCPay invoice. That is bumped to 30 days as soon as BTCPay recognizes the payment as \"confirmed\". BTCPay settings determine how many confirmations are required to make a payment \"confirmed.\"\n\nThe first visitor to the site will be prompted to register as administrator. The administrator is the user that posts updates, gets paid, etc. The administrator is the content creator.\n\nAfter registering as administrator, be sure to first make a \"homepage\" post. A \"homepage\" post does not appear on your updates, but sets the text for the main page that all visitors (subscribed or not) can view. Then you can add some updates.\n\nBefore letting people know about your site, be sure to click \"Account\" to pair your site to your BTCPay server!\n\n<h3>Notes</h3>\n\nYou'll note that during setup, you must provide a \"secret code\" of random digits. This is necessary for a variety of reasons. If you don't know how to get one, here is one method:\n\n```python\npython3\n>>>import os\n>>>os.urandom(24).hex()\n>>>exit()\n```\n\nA random string will be printed to screen that you can then copy and paste.\n\nYou'll also need SMTP server info. Gmail is not a good server to use for this purpose. If you need one, here's an example of a service that would work: https://www.easy-smtp.com/\n" }, { "alpha_fraction": 0.533695638179779, "alphanum_fraction": 0.551086962223053, "avg_line_length": 41.79069900512695, "blob_id": "5b35ca9b2dd17eb1f116452e47bcfb070f75334b", "content_id": "ee8aed3a3869ceccc44f9ccd764f95b8ede837b6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1840, "license_type": "permissive", "max_line_length": 75, "num_lines": 43, "path": "/app/api/routes.py", "repo_name": "CalebWinston/patron", "src_encoding": "UTF-8", "text": "from app import db\nfrom app.api import bp\nfrom app.models import BTCPayClientStore, User\nfrom datetime import datetime, timedelta\nfrom flask import request, abort\n\n\n@bp.route('/v1/updatesub', methods=['GET', 'POST'])\ndef update_sub():\n # receives and processes pmt notifications from BTCPay\n if not request.json or 'id' not in request.json:\n return \"Not a valid IPN.\", 200\n btc_client_store = BTCPayClientStore.query.all()[0]\n btc_client = btc_client_store.client\n invoice = btc_client.get_invoice(request.json['id'])\n if isinstance(invoice, dict):\n if 'status' in invoice:\n if invoice['status'] == \"paid\" or \\\n invoice['status'] == \"confirmed\":\n user = User.query.filter_by(\n username=invoice['buyer']['name']).first()\n if user is None:\n return \"Payment made for unregistered user.\", 200\n if user.role == 'admin':\n return \"Administrator should not make payments.\", 200\n elif invoice['status'] == \"confirmed\":\n user.expiration = datetime.today() + timedelta(days=30)\n user.role = invoice['orderId']\n db.session.commit()\n return \"Payment Accepted\", 201\n elif invoice['status'] == \"paid\":\n user.expiration = datetime.today() + timedelta(days=1)\n user.role = invoice['orderId']\n db.session.commit()\n return \"Payment Accepted\", 201\n else:\n return \"IPN Received\", 200\n else:\n return \"Status not paid or confirmed.\", 200\n else:\n return \"No payment status received.\", 200\n else:\n return \"Invalid transaction ID.\", 400\n" }, { "alpha_fraction": 0.6880919933319092, "alphanum_fraction": 0.6887686252593994, "avg_line_length": 42.47058868408203, "blob_id": "72bd2804c45df40303c1d01817c1036cc1394fd1", "content_id": "8204119dee6016448531767762baac1f65c62b06", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1478, "license_type": "permissive", "max_line_length": 73, "num_lines": 34, "path": "/config.py", "repo_name": "CalebWinston/patron", "src_encoding": "UTF-8", "text": "import os\nfrom os.path import normpath, abspath, join\n\nbasedir = abspath(os.path.dirname(__file__))\n\n\nclass Config(object):\n ADMIN = os.environ.get('EMAIL')\n BLOGGING_SITENAME = os.environ.get('SITENAME') or 'LibrePatron'\n BLOGGING_SITEURL = os.environ.get('SITEURL') or 'https://example.com'\n BLOGGING_URL_PREFIX = '/updates'\n BLOGGING_BRANDURL = os.environ.get('BRANDURL')\n BLOGGING_TWITTER_USERNAME = os.environ.get('TWITTER')\n BLOGGING_DISQUS_SITENAME = os.environ.get('DISQUS')\n BLOGGING_GOOGLE_ANALYTICS = os.environ.get('GOOGLE_ANALYTICS')\n BLOGGING_PERMISSIONS = True\n BLOGGING_PERMISSIONNAME = 'admin'\n BLOGGING_PLUGINS = None\n BLOGGING_ALLOW_FILE_UPLOAD = True\n BLOGGING_ESCAPE_MARKDOWN = False\n MAIL_SERVER = os.environ.get('MAIL_SERVER')\n if os.environ.get('MAIL_PORT') is not None:\n MAIL_PORT = int(os.environ.get('MAIL_PORT'))\n MAIL_USE_TLS = os.environ.get('MAIL_USE_TLS') is not None\n MAIL_USERNAME = os.environ.get('MAIL_USERNAME')\n MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')\n PREFERRED_URL_SCHEME = 'https'\n SCHEDULER_HOUR = os.environ.get('SCHEDULER_HOUR') or 9\n SCHEDULER_MINUTE= os.environ.get('SCHEDULER_MINUTE')\n SECRET_KEY = os.environ.get('SECRET_KEY') or 'placeholder'\n SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \\\n 'sqlite:///' + join(basedir, 'app.db')\n SQLALCHEMY_TRACK_MODIFICATIONS = False\n SERVER_NAME = os.environ.get('VIRTUAL_HOST')\n" }, { "alpha_fraction": 0.5894821286201477, "alphanum_fraction": 0.5908237099647522, "avg_line_length": 32.27678680419922, "blob_id": "3a13b6a546117736b96dc1940b239d864c46f47d", "content_id": "e463f85c7116cdad7ed08e211903ccc0c086529f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3727, "license_type": "permissive", "max_line_length": 86, "num_lines": 112, "path": "/app/main/routes.py", "repo_name": "CalebWinston/patron", "src_encoding": "UTF-8", "text": "from app import blog_engine\nfrom app.main import bp\nfrom app.models import BTCPayClientStore\nfrom app.pricing import Pricing\nfrom flask import redirect, url_for, flash, render_template, request\nfrom flask_blogging_patron import PostProcessor\nfrom flask_blogging_patron.views import page_by_id_fetched,\\\n page_by_id_processed\nfrom flask_login import current_user, login_required\nimport traceback\n\n\n@bp.route('/')\n@bp.route('/index')\ndef index():\n try:\n posts = blog_engine.storage.get_posts(\n count=1,\n recent=True,\n tag='public'\n )\n temp_post = posts[0]\n except Exception as e:\n traceback.print_tb(e.__traceback__)\n flash('This site has no homepage yet. \\\n Please create one.', 'warning')\n if current_user.is_authenticated and current_user.role == 'admin':\n return redirect(url_for('blogging.editor'))\n else:\n return redirect(url_for('auth.register'))\n config = blog_engine.config\n post = blog_engine.storage.get_post_by_id(temp_post['post_id'])\n meta = {}\n meta['is_user_blogger'] = False\n if current_user.is_authenticated:\n if hasattr(current_user, 'role'):\n if current_user.role == 'admin':\n meta['is_user_blogger'] = True\n meta['post_id'] = temp_post['post_id']\n meta['slug'] = PostProcessor.create_slug(temp_post['title'])\n page_by_id_fetched.send(\n blog_engine.app,\n engine=blog_engine,\n post=post,\n meta=meta\n )\n blog_engine.process_post(post, render=True)\n page_by_id_processed.send(\n blog_engine.app,\n engine=blog_engine,\n post=post,\n meta=meta\n )\n return render_template(\n 'main/homepage.html',\n post=post,\n config=config,\n meta=meta\n )\n\n\n@bp.route('/support')\ndef support():\n price_levels = Pricing().price_levels\n return render_template('main/support.html',\n levels=price_levels)\n\n\n@bp.route('/createinvoice')\n@login_required\ndef create_invoice():\n price_plans = Pricing().price_plans\n user_arg = request.args.get('username')\n if user_arg is not None:\n if user_arg != current_user.username:\n flash('You are logged in as a different user!\\\n Please log out first.', 'warning')\n return redirect(url_for('main.index'))\n else:\n current_plan = current_user.role\n if current_plan is not None:\n price = price_plans.get(current_plan)\n if price is None:\n return redirect(url_for('main.support'))\n plan = current_plan\n else:\n return redirect(url_for('main.support'))\n else:\n string_price = request.args.get('price')\n if string_price is None:\n return redirect(url_for('main.support'))\n plan = request.args.get('name')\n price = int(string_price)\n if price_plans.get(plan) != price:\n return redirect(url_for('main.support'))\n btc_client = BTCPayClientStore.query.first().client\n if btc_client is None:\n return 'BTCPay has not been paired!', 501\n inv_data = btc_client.create_invoice({\n \"price\": price,\n \"currency\": \"USD\",\n \"buyer\": {\n \"name\": current_user.username,\n \"email\": current_user.email,\n },\n \"orderId\": plan,\n \"extendedNotifications\": True,\n \"fullNotifications\": True,\n \"notificationURL\": url_for('api.update_sub', _external=True, _scheme='https'),\n \"redirectURL\": url_for('main.index', _external=True, _scheme='https')\n })\n return redirect(inv_data['url'])\n" }, { "alpha_fraction": 0.6606170535087585, "alphanum_fraction": 0.6696914434432983, "avg_line_length": 32.39393997192383, "blob_id": "614f4a11aee34d5fb532734f2cfb6c883ce0f7cb", "content_id": "354ce5abc6b8a8edda5e7f5be861ef465649c36c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1102, "license_type": "permissive", "max_line_length": 67, "num_lines": 33, "path": "/app/tasks.py", "repo_name": "CalebWinston/patron", "src_encoding": "UTF-8", "text": "from app import scheduler, db\nfrom app.email import send_reminder_emails\nfrom app.models import User\nfrom datetime import datetime, timedelta\n\nhour = scheduler.app.config.get('SCHEDULER_HOUR')\nminute = scheduler.app.config.get('SCHEDULER_MINUTE')\nif hour is not None:\n hour = int(hour)\nelse:\n hour = 9\nif minute is not None:\n minute = int(minute)\n\n\n@scheduler.task('cron', id='do_renewals', hour=hour, minute=minute)\ndef renewals():\n yesterday = datetime.today() - timedelta(hours=24)\n tomorrow = datetime.today() + timedelta(hours=24)\n with scheduler.app.app_context():\n last_reminder = User.query.filter(\n User.expiration < tomorrow,\n User.expiration > yesterday\n ).all()\n six = datetime.today() + timedelta(hours=144)\n four = datetime.today() + timedelta(hours=96)\n with scheduler.app.app_context():\n first_reminder = User.query.filter(\n User.expiration < six,\n User.expiration > four\n ).all()\n reminder_list = first_reminder + last_reminder\n send_reminder_emails(scheduler.app, reminder_list)\n" }, { "alpha_fraction": 0.5785235166549683, "alphanum_fraction": 0.5785235166549683, "avg_line_length": 28.799999237060547, "blob_id": "7198a69b98552eae6b2f087a38466b39744b32df", "content_id": "c156e4d97098322f6bfca6af2f08707812d4f1a6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 745, "license_type": "permissive", "max_line_length": 67, "num_lines": 25, "path": "/app/pricing.py", "repo_name": "CalebWinston/patron", "src_encoding": "UTF-8", "text": "from pathlib import Path\nfrom ruamel.yaml import YAML\n\n\nclass Pricing(object):\n def __init__(self):\n self.price_levels = self._get_price_levels()\n self.price_plans = self._levels_to_plans(self.price_levels)\n\n def _get_price_levels(self):\n yaml = YAML(typ='safe')\n file = Path('/var/pricing/pricing.yaml')\n if file.is_file():\n pricing = file\n else:\n pricing = Path('pricing.yaml.sample')\n with open(pricing) as f:\n levels = yaml.load(f)\n return levels\n\n def _levels_to_plans(self, price_levels):\n price_plans = {}\n for value in price_levels.values():\n price_plans[value['name']] = value['price']\n return price_plans\n" }, { "alpha_fraction": 0.6873614192008972, "alphanum_fraction": 0.6873614192008972, "avg_line_length": 30.465116500854492, "blob_id": "ca2fb738ce3620b7990b87f89945aa413ffcb6f6", "content_id": "a31d357da3794c3774d1a1680a126774db2b1f69", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1353, "license_type": "permissive", "max_line_length": 66, "num_lines": 43, "path": "/app/admin_views/__init__.py", "repo_name": "CalebWinston/patron", "src_encoding": "UTF-8", "text": "from app import admin, db\nfrom app.admin_views.forms import BTCCodeForm\nfrom app.models import User\nfrom app.utils import pairing\nfrom flask_admin import BaseView, expose\nfrom flask_admin.contrib.sqla import ModelView\nfrom flask import flash, redirect, url_for\nfrom flask_login import current_user\n\n\nclass LibrePatronBaseView(BaseView):\n def is_accessible(self):\n return current_user.is_authenticated and \\\n current_user.role == 'admin'\n\n def inaccessible_callback(self, name, **kwargs):\n return redirect(url_for('auth.login'))\n\n\nclass BTCPayView(LibrePatronBaseView):\n @expose('/', methods=['GET', 'POST'])\n def btcpay(self):\n form = BTCCodeForm()\n if form.validate_on_submit():\n pairing(code=form.code.data, host=form.host.data)\n flash('Pairing to BTCPay is complete.')\n return redirect(url_for('admin.index'))\n return self.render('admin/btcpay.html', form=form)\n\n\nadmin.add_view(BTCPayView(name='BTCPay Setup', endpoint='btcpay'))\n\n\nclass LibrePatronModelView(ModelView):\n def is_accessible(self):\n return current_user.is_authenticated and \\\n current_user.role == 'admin'\n\n def inaccessible_callback(self, name, **kwargs):\n return redirect(url_for('auth.login'))\n\n\nadmin.add_view(LibrePatronModelView(User, db.session))\n" }, { "alpha_fraction": 0.7139107584953308, "alphanum_fraction": 0.7139107584953308, "avg_line_length": 30.75, "blob_id": "d9371894040e5c16401b4b89512fb47e7c8d708b", "content_id": "0c435b0ac78f67a7c0d79e0b27074cf5e76ce201", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 381, "license_type": "permissive", "max_line_length": 67, "num_lines": 12, "path": "/app/admin_views/forms.py", "repo_name": "CalebWinston/patron", "src_encoding": "UTF-8", "text": "from flask_wtf import FlaskForm\nfrom wtforms import StringField, SubmitField\nfrom wtforms.validators import DataRequired\n\n\nclass BTCCodeForm(FlaskForm):\n host = StringField(\n 'URL of BTCPay Instance (include the \"https://\")',\n validators=[DataRequired()]\n )\n code = StringField('Pairing Code', validators=[DataRequired()])\n submit = SubmitField('Submit')\n" }, { "alpha_fraction": 0.5053635239601135, "alphanum_fraction": 0.7020262479782104, "avg_line_length": 16.47916603088379, "blob_id": "a9470cb5a01f5db8916e97de1f50a9fe8c0911e5", "content_id": "d7e8add74c57c740526adaccebf92a3474e5d2e0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 839, "license_type": "permissive", "max_line_length": 25, "num_lines": 48, "path": "/requirements.txt", "repo_name": "CalebWinston/patron", "src_encoding": "UTF-8", "text": "alembic==1.0.5\nAPScheduler==3.5.3\nasn1crypto==0.24.0\nblinker==1.4\nbtcpay==1.0.3\ncertifi==2018.11.29\ncffi==1.11.5\nchardet==3.0.4\nClick==7.0\ncryptography==2.4.2\ndominate==2.3.5\necdsa==0.13\nFlask==1.0.2\nFlask-APScheduler==1.11.0\nFlask-Admin==1.5.3\nFlask-Bootstrap==3.3.7.1\nFlask-Caching==1.4.0\nFlask-FileUpload==0.5.0\nFlask-Login==0.4.1\nFlask-Mail==0.9.1\nFlask-Migrate==2.3.1\nFlask-Principal==0.4.0\nFlask-SQLAlchemy==2.3.2\nFlask-WTF==0.14.2\nidna==2.8\nitsdangerous==1.1.0\nJinja2==2.10\nMako==1.0.7\nMarkdown==3.0.1\nMarkupSafe==1.1.0\npycparser==2.19\nPyJWT==1.7.1\npython-dateutil==2.7.5\npython-dotenv==0.10.1\npython-editor==1.0.3\npython-slugify==2.0.0\npytz==2018.7\nrequests==2.21.0\nruamel.yaml==0.15.82\nshortuuid==0.5.0\nsix==1.12.0\nSQLAlchemy==1.2.15\ntzlocal==1.5.1\nUnidecode==1.0.23\nurllib3==1.24.1\nvisitor==0.1.3\nWerkzeug==0.14.1\nWTForms==2.2.1\n" }, { "alpha_fraction": 0.6600984930992126, "alphanum_fraction": 0.6699507236480713, "avg_line_length": 32.83333206176758, "blob_id": "7c4e7608a2cf0dd00427f68c99c31c9b1150b149", "content_id": "c96762882ba2c7d917042ae915442bd2dff564ee", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 812, "license_type": "permissive", "max_line_length": 64, "num_lines": 24, "path": "/patron.py", "repo_name": "CalebWinston/patron", "src_encoding": "UTF-8", "text": "# Copyright (c) 2018 Jeff Vandrew Jr\n\nfrom app import create_app, db, blog_engine\nfrom app.email import send_reminder_emails\nfrom app.models import User, BTCPayClientStore, SecretKey\nfrom app.pricing import Pricing\nfrom datetime import datetime, date, timedelta\nfrom flask_blogging_patron.signals import editor_post_saved\nimport os\n\napp = create_app()\n\n\n@app.shell_context_processor\ndef make_shell_context():\n return {'db': db, 'User': User,\n 'editor_post_saved': editor_post_saved,\n 'blog_engine': blog_engine,\n 'BTCPayClientStore': BTCPayClientStore,\n 'Pricing': Pricing,\n 'send_reminder_emails': send_reminder_emails,\n 'tomorrow': datetime.today() + timedelta(hours=24),\n 'yesterday': datetime.today() - timedelta(hours=24),\n }\n" } ]
13
embray/tracext-redispub
https://github.com/embray/tracext-redispub
428ac7c14842574e088261276605bc39762f118c
ebc4c2dd787468b8dafda683b244498246d67932
cfabd89e9978e4d48582542cacddd0cb7dc21386
refs/heads/master
2021-01-25T13:35:10.418209
2018-03-09T16:07:36
2018-03-09T16:07:36
123,585,937
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.5639160871505737, "alphanum_fraction": 0.570629358291626, "avg_line_length": 36.23958206176758, "blob_id": "36d36f78678f844e503e04c1085cf9c67af498c1", "content_id": "a794ba5ff56c1ca74de2e2865e015481aedffa3d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3575, "license_type": "no_license", "max_line_length": 82, "num_lines": 96, "path": "/tracext/redispub/wiki.py", "repo_name": "embray/tracext-redispub", "src_encoding": "UTF-8", "text": "\"\"\"Wiki event handling.\"\"\"\n\nfrom __future__ import absolute_import\n\n\nfrom trac.core import implements\nfrom trac.wiki.api import IWikiChangeListener\n\nfrom .redis import RedisComponent\nfrom .util import dumps\n\n\nclass RedisWikiStream(RedisComponent):\n \"\"\"\n Listens for wiki creation/change/deletion events and and publishes\n them to the relevant channels:\n\n * Wiki page creation events are published to the\n ``<prefix>.<env>.wiki.created`` channel, where ``<prefix>`` is the\n configurable channel name prefix (e.g. 'trac'), and ``<env>`` is the\n environment name.\n\n Messages on this channel are JSON-encoded dictionaries representing\n the wiki page (``\"version\"`` is always ``1`` for new pages)::\n\n {\n \"name\": \"PageName\",\n \"version\": 1,\n \"time\": \"2018-03-02T12:31:28.184283\",\n \"author\": \"somebody\",\n \"text\": \"...full page text...\",\n \"comment\": \"edit comment, if any\",\n \"readonly\": 0\n }\n\n * Wiki change events are published to the\n ``<prefix>.<env>.wiki.changed.<name>`` channel, where ``<prefix>``\n and ``<env>`` are as before, and ``<name>`` is the wiki page name. This\n allows subscribing just to the changes on a specific wiki page, if\n desired. The message is a JSON-encoded dictionary with the same\n format as the wiki created event, along with an additional\n ``\"old_text\"`` property containing the previous text of the wiki page\n before the change::\n\n {\n \"name\": \"PageName\",\n \"version\": 2,\n ...\n \"text\": \"the new text of the page\",\n \"old_text\": \"the old text of the page\",\n ...\n }\n\n * Wiki deletion events are published to the\n ``<prefix>.<env>.wiki.deleted`` channel. These events have the same\n format as wiki creation events, and include the properties of the\n just-deleted wiki page..\n \"\"\"\n\n implements(IWikiChangeListener)\n\n _realm = 'wiki'\n\n # ITicketChangeListener methods\n def wiki_page_added(self, page):\n self.redis.publish(self._channel_name('created'),\n self._page_to_json(page))\n\n def wiki_page_changed(self, page, version, t, comment, author, ipnr=None):\n # note: the version, timestamp (t), comment, and author arguments\n # are all passed to this method for backwards-compatibility, but the\n # same data is already available in the page object\n self.redis.publish(self._channel_name('changed', page.name),\n self._page_to_json(page, old_text=True))\n\n def wiki_page_deleted(self, ticket):\n self.redis.publish(self._channel_name('deleted'),\n self._page_to_json(page))\n\n def _channel_name(self, method, page_name=None):\n channel = super(RedisWikiStream, self)._channel_name(method)\n if page_name is not None:\n channel += '.' + page_name\n return channel\n\n def _page_to_json(self, page, old_text=False):\n \"\"\"\n Convert a `trac.wiki.model.WikiPage` object to a JSONified dictionary.\n \"\"\"\n attrs = ['name', 'version', 'time', 'author', 'text', 'comment',\n 'readonly']\n\n if old_text:\n attrs.append('old_text')\n\n return dumps(dict((attr, getattr(page, attr)) for attr in attrs))\n" }, { "alpha_fraction": 0.6761487722396851, "alphanum_fraction": 0.6761487722396851, "avg_line_length": 24.38888931274414, "blob_id": "0f4d5382ebb595c2945d19b0c6d72087638d735d", "content_id": "57b23333537b1b069336510d07f83059c52d1e05", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 457, "license_type": "no_license", "max_line_length": 56, "num_lines": 18, "path": "/tracext/redispub/util.py", "repo_name": "embray/tracext-redispub", "src_encoding": "UTF-8", "text": "import datetime\nimport functools\nimport json\n\n\nclass JSONEncoder(json.JSONEncoder):\n REGISTERED_TYPES = {\n datetime.datetime: lambda o: o.isoformat(),\n datetime.date: lambda o: o.isoformat()\n }\n def default(self, obj):\n if type(obj) in self.REGISTERED_TYPES:\n return self.REGISTERED_TYPES[type(obj)](obj)\n\n return super(JSONEncoder, self).default(obj)\n\n\ndumps = functools.partial(json.dumps, cls=JSONEncoder)\n" }, { "alpha_fraction": 0.654566764831543, "alphanum_fraction": 0.6580796241760254, "avg_line_length": 30.592592239379883, "blob_id": "503a11f70d4c1b1783627687d6bbc45373d0dfa3", "content_id": "ca65e5a32540c9460f08c233d1572400a5e9f59c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 854, "license_type": "no_license", "max_line_length": 76, "num_lines": 27, "path": "/setup.py", "repo_name": "embray/tracext-redispub", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nfrom setuptools import setup\n\nsetup(\n name='tracext-redispub',\n version='0.2.dev0',\n author='Erik M. Bray',\n author_email='erik.m.bray@gmail.com',\n url='https://github.com/embray/tracext-redispub',\n description='Redis pub/sub channels for Trac events',\n download_url='https://pypi.python.org/pypi/tracext-redispub',\n packages=['tracext', 'tracext.redispub'],\n platforms='all',\n license='BSD',\n # Loose versions for now since this will work with most versions of Trac\n # and most current versions of redis-py\n install_requires=[\n 'trac',\n 'redis'\n ],\n entry_points={'trac.plugins': [\n 'redispub.client = tracext.redispub.redis:RedisClient',\n 'redispub.ticket = tracext.redispub.ticket:RedisTicketStream',\n 'redispub.wiki = tracext.redispub.wiki:RedisWikiStream'\n ]}\n)\n\n" }, { "alpha_fraction": 0.6185101866722107, "alphanum_fraction": 0.6441375613212585, "avg_line_length": 39.702701568603516, "blob_id": "4591cb64f083fbd1f617d32eb0e47541a905624a", "content_id": "3365e2c2a13ee9a26fc0eef750fdd6bbd68f4e77", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 7531, "license_type": "no_license", "max_line_length": 594, "num_lines": 185, "path": "/README.rst", "repo_name": "embray/tracext-redispub", "src_encoding": "UTF-8", "text": "Trac Redis Pub\n==============\n`Trac <https://trac.edgewall.org/>`_ plugin providing `Redis pub/sub\nchannels <https://redis.io/topics/pubsub>`_ for Trac events, specifically\nticket and wiki page creation/updates.\n\nThis can be used to implement services external to the Trac web server\nitself that react to events on a Trac project. For example, one can provide\na stream of ticket events like:\n\n.. code:: python\n\n >>> import redis\n >>> client = redis.Redis()\n >>> pubsub = client.pubsub()\n >>> pubsub.psubscribe('trac.*')\n >>> for event in pubsub.listen():\n ... print(event)\n ...\n {'pattern': None, 'type': 'psubscribe', 'channel': 'trac.*', 'data': 1L}\n {'pattern': 'trac.*', 'type': 'pmessage', 'channel': 'trac.test.wiki.created', 'data': '{\"comment\": \"Change comment\", \"name\": \"NewWikiPage\", \"author\": \"anonymous\", \"text\": \"Page contents.\", \"readonly\": 0, \"version\": 1, \"time\": \"2018-03-02T14:10:22.844985+00:00\"}'}\n {'pattern': 'trac.*', 'type': 'pmessage', 'channel': 'trac.test.wiki.changed.NewWikiPage', 'data': '{\"comment\": \"Editing page.\", \"name\": \"NewWikiPage\", \"author\": \"anonymous\", \"text\": \"Page contents.\\\\r\\\\nAdditional contents.\", \"old_text\": \"Page contents.\", \"readonly\": 0, \"version\": 2, \"time\": \"2018-03-02T14:10:36.192988+00:00\"}'}\n {'pattern': 'trac.*', 'type': 'pmessage', 'channel': 'trac.test.ticket.created', 'data': '{\"status\": \"new\", \"changetime\": \"2018-03-02T14:15:01.401989+00:00\", \"reporter\": \"anonymous\", \"cc\": \"\", \"milestone\": \"\", \"component\": \"component1\", \"keywords\": \"\", \"owner\": \"somebody\", \"id\": 17, \"description\": \"Problem description.\", \"author\": \"\", \"summary\": \"Test ticket\", \"priority\": \"major\", \"version\": \"\", \"time\": \"2018-03-02T14:15:01.401989+00:00\", \"type\": \"defect\"}'}\n {'pattern': 'trac.*', 'type': 'pmessage', 'channel': 'trac.test.ticket.changed.17', 'data': '{\"comment\": \"Updated milestone.\", \"new_values\": {\"status\": \"new\", \"changetime\": \"2018-03-02T14:15:01.401989+00:00\", \"reporter\": \"anonymous\", \"cc\": \"\", \"milestone\": \"milestone1\", \"component\": \"component1\", \"keywords\": \"\", \"owner\": \"somebody\", \"id\": 17, \"description\": \"Problem description.\", \"author\": \"\", \"summary\": \"Test ticket\", \"priority\": \"major\", \"version\": \"\", \"time\": \"2018-03-02T14:15:01.401989+00:00\", \"type\": \"defect\"}, \"id\": 17, \"old_values\": {\"milestone\": \"\"}, \"author\": \"anonymous\"}'}\n \n\n.. note::\n\n Version 0.1 supports basic ticket and wiki-related events. However,\n there are many other types of events in Trac that could be monitored\n this way, for which support may be added in future versions in the\n remote chance there is any demand.\n\n\nInstallation and Configuration\n------------------------------\n\nThe plugin can be installed in the standard way using ``pip``::\n\n $ pip install tracext-redispub\n\nTo enable the plugin in Trac, add the following to the ``[components]``\nsection of your ``trac.ini``:\n\n.. code:: ini\n\n [components]\n tracext.redispub.redis.redisclient = enabled\n tracext.redispub.ticket.redisticketstream = enabled\n tracext.redispub.wiki.rediswikistream = enabled\n\nThe ``tracext.redispub.redis.redisclient`` component must be enabled for the\nother components to work. The other components may be enabled or disabled\nas desired (e.g. to disable all wiki channels use ``tracext.redispub.wiki.*\n= disabled``).\n\nThe plugin is further configured through two additional configuration\nsections. The first section is ``[redis_client]`` which is used for\nconfiguring how to connect to the Redis server. Currently this only takes a\nsmall number of options--later it will be expanded to the fuller range of\noptions for configuring your Redis connection:\n\n.. code:: ini\n\n [redis_client]\n # hostname of the Redis server\n host = localhost\n # Redis server port\n port = 6379\n # Redis DB number (although this can be configured, it is irrelevant for\n # pub/sub purposes)\n db = 0\n # Redis server password\n password =\n # Path to UNIX socket to connect over instead of through TCP\n unix_socket_path =\n\nOne additional section configures the behavior of the plugin itself (and\ncurrently has only one option):\n\n.. code:: ini\n\n [redispub]\n # This string is prefixed to the names of all channels published to by\n # the plugin\n channel_prefix = trac\n\n\nAvailable Channels\n------------------\n\nThe following channels can be subscribed to:\n\nTicket channels\n^^^^^^^^^^^^^^^\n\n* Ticket creation events are published to the\n ``<prefix>.<env>.ticket.created`` channel, where ``<prefix>`` is the\n configurable channel name prefix (e.g. 'trac'), and ``<env>`` is the\n environment name.\n\n Messages on this channel consist of the field values of the created ticket\n (along with the ticket ID) as a JSON-encoded dictionary:\n\n .. code:: json\n \n {\n \"id\": 1,\n \"summary\": \"...\",\n \"description\": \"...\",\n ...\n }\n\n* Ticket change events are published to the\n ``<prefix>.<env>.ticket.changed.<id>`` channel, where ``<prefix>`` and\n ``<env>`` are as before, and ``<id>`` is the ticket ID. This allows\n subscribing just to the changes on a specific ticket, if desired. The\n message is a JSON-encoded dictionary with the following format:\n\n .. code:: json\n \n {\n \"id\": 1,\n \"new_values\": { ... },\n \"old_values\": { ... },\n \"author\": \"somebody\",\n \"comment\": \"A comment...\"\n }\n\n Where ``\"id\"`` is the ticket ID. ``\"new_values\"`` maps field names to\n their new values (including fields that did not change), and\n ``\"old_values\"`` maps field names to the previous values of fields that\n changed. ``\"author\"`` is the author of the change, and ``\"comment\"`` is\n the comment associated with the change (which may be blank).\n\n* Ticket deletion events are published to the\n ``<prefix>.<env>.ticket.deleted`` channel. These events have the same\n format as ticket creation events, and include the values of all the fields\n on the just-deleted ticket.\n\nWiki channels\n-------------\n\n* Wiki page creation events are published to the\n ``<prefix>.<env>.wiki.created`` channel, where ``<prefix>`` is the\n configurable channel name prefix (e.g. 'trac'), and ``<env>`` is the\n environment name.\n\n Messages on this channel are JSON-encoded dictionaries representing the\n wiki page (``\"version\"`` is always ``1`` for new pages):\n\n .. code:: json\n\n {\n \"name\": \"PageName\",\n \"version\": 1,\n \"time\": \"2018-03-02T12:31:28.184283\",\n \"author\": \"somebody\",\n \"text\": \"...full page text...\",\n \"comment\": \"edit comment, if any\",\n \"readonly\": 0\n }\n\n* Wiki change events are published to the\n ``<prefix>.<env>.wiki.changed.<name>`` channel, where ``<prefix>`` and\n ``<env>`` are as before, and ``<name>`` is the wiki page name. This\n allows subscribing just to the changes on a specific wiki page, if\n desired. The message is a JSON-encoded dictionary with the same format as\n the wiki created event, along with an additional ``\"old_text\"`` property\n containing the previous text of the wiki page before the change:\n \n .. code:: json\n\n {\n \"name\": \"PageName\",\n \"version\": 2,\n ...\n \"text\": \"the new text of the page\",\n \"old_text\": \"the old text of the page\",\n ...\n }\n\n* Wiki deletion events are published to the ``<prefix>.<env>.wiki.deleted``\n channel. These events have the same format as wiki creation events, and\n include the properties of the just-deleted wiki page..\n\n" }, { "alpha_fraction": 0.5706790089607239, "alphanum_fraction": 0.5712962746620178, "avg_line_length": 34.60439682006836, "blob_id": "d5202508d30af33effdb3b36fe2f02b0e17ef026", "content_id": "8bfd80de5365db8e5d78610993b44cad109345e4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3240, "license_type": "no_license", "max_line_length": 79, "num_lines": 91, "path": "/tracext/redispub/ticket.py", "repo_name": "embray/tracext-redispub", "src_encoding": "UTF-8", "text": "\"\"\"Ticket event handling.\"\"\"\n\nfrom __future__ import absolute_import\n\n\nfrom trac.core import implements\nfrom trac.ticket.api import ITicketChangeListener\n\nfrom .redis import RedisComponent\nfrom .util import dumps\n\n\nclass RedisTicketStream(RedisComponent):\n \"\"\"\n Listens for ticket creation/change/deletion events and and publishes\n them to the relevant channels:\n\n * Ticket creation events are published to the\n ``<prefix>.<env>.ticket.created`` channel, where ``<prefix>`` is the\n configurable channel name prefix (e.g. 'trac'), and ``<env>`` is the\n environment name.\n\n Messages on this channel consist of the field values of the created\n ticket (along with the ticket ID) as a JSON-encoded dictionary::\n\n {\n \"id\": 1,\n \"summary\": \"...\",\n \"description\": \"...\",\n ...\n }\n\n * Ticket change events are published to the\n ``<prefix>.<env>.ticket.changed.<id>`` channel, where ``<prefix>``\n and ``<env>`` are as before, and ``<id>`` is the ticket ID. This\n allows subscribing just to the changes on a specific ticket, if\n desired. The message is a JSON-encoded dictionary with the following\n format::\n\n {\n \"id\": 1,\n \"new_values\": { ... },\n \"old_values\": { ... },\n \"author\": \"somebody\",\n \"comment\": \"A comment...\"\n }\n\n Where ``\"id\"`` is the ticket ID. ``\"new_values\"`` maps field names to\n their new values (including fields that did not change), and\n ``\"old_values\"`` maps field names to the previous values of fields\n that changed. ``\"author\"`` is the author of the change, and\n ``\"comment\"`` is the comment associated with the change (which may be\n blank).\n\n * Ticket deletion events are published to the\n ``<prefix>.<env>.ticket.deleted`` channel. These events have the\n same format as ticket creation events, and include the values of all\n the fields on the just-deleted ticket.\n \"\"\"\n\n implements(ITicketChangeListener)\n\n _realm = 'ticket'\n\n # ITicketChangeListener methods\n def ticket_created(self, ticket):\n data = dict(ticket.values)\n data['id'] = ticket.id\n self.redis.publish(self._channel_name('created'), dumps(data))\n\n def ticket_changed(self, ticket, comment, author, old_values):\n data = {\n 'new_values': ticket.values,\n 'old_values': old_values,\n 'author': author,\n 'comment': comment,\n 'id': ticket.id,\n }\n self.redis.publish(self._channel_name('changed', ticket.id),\n dumps(data))\n\n def ticket_deleted(self, ticket):\n data = dict(ticket.values)\n data['id'] = ticket.id\n self.redis.publish(self._channel('deleted'), dumps(data))\n\n def _channel_name(self, method, ticket_id=None):\n channel = super(RedisTicketStream, self)._channel_name(method)\n if ticket_id is not None:\n channel += '.' + str(ticket_id)\n return channel\n" }, { "alpha_fraction": 0.5269147753715515, "alphanum_fraction": 0.5306059718132019, "avg_line_length": 29.101852416992188, "blob_id": "cd25b445ca36e543bd2f68cf016954985083ce1c", "content_id": "3dcf7c93ce9eae576819424cb0e993fa605b6a97", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3251, "license_type": "no_license", "max_line_length": 78, "num_lines": 108, "path": "/tracext/redispub/redis.py", "repo_name": "embray/tracext-redispub", "src_encoding": "UTF-8", "text": "\"\"\"Core Redis client component.\"\"\"\n\nfrom __future__ import absolute_import\n\n\nfrom trac.config import Option, IntOption, PathOption\nfrom trac.core import Component, TracError\n\nimport redis\n\n\nclass RedisClient(Component):\n \"\"\"\n A mini-component for managing a `redis.Redis` client instance (and its\n underlying connection pool) for use by other components that use Redis, as\n well as options passed to the client.\n \"\"\"\n\n _config_section = 'redis_client'\n\n _redis_client_options = {\n 'host': 'localhost',\n 'port': 6379,\n 'db': 0,\n 'password': None,\n 'unix_socket_path': (None, 'path')\n }\n\n\n _type_to_option_getter = {\n type(None): 'get',\n str: 'get',\n int: 'getint',\n 'path': 'getpath'\n }\n\n def __init__(self):\n super(RedisClient, self).__init__()\n self.redis = redis.Redis(**self._client_options())\n\n def _client_options(self):\n \"\"\"\n Read supported arguments to `redis.Redis` from the Trac config.\n \"\"\"\n\n options = {}\n for key, value in self._redis_client_options.items():\n if isinstance(value, tuple):\n if len(value) == 1:\n default = value[0]\n type_ = None\n elif len(value) == 2:\n default, type_ = value\n else:\n default = value\n type_ = None\n\n if type_ is None:\n # Infer type from the default type\n type_ = type(default)\n\n getter = getattr(self.env.config,\n self._type_to_option_getter[type_])\n options[key] = getter(self._config_section, key, default)\n\n return options\n\n\nclass RedisComponent(Component):\n \"\"\"\n A simple base class for `Component`s that provides a ``.redis`` property\n returning the `Redis` client instance of the `RedisClient` component.\n \"\"\"\n\n channel_prefix = Option('redispub', 'channel_prefix', 'trac',\n doc=\"Prefix to use for channels published to by \"\n \"components of this plug-in. All other \"\n \"channels are dotted with this name; e.g. \"\n \"trac.<envname>.ticket.created\")\n\n _realm = None\n \"\"\"\n The realm (e.g. 'ticket', 'wiki') handled by subclasses of this component.\n \"\"\"\n\n def __init__(self):\n if not self.env.is_enabled(RedisClient):\n raise TracError(\n \"The {0}.{1} component must be enabled in order to use the \"\n \"{0}.{2} component.\".format(__name__, RedisClient.__name__,\n self.__class__.__name__))\n\n super(RedisComponent, self).__init__()\n\n @property\n def redis(self):\n \"\"\"Returns the `Redis` client instance.\"\"\"\n\n return self.env[RedisClient].redis\n\n def _channel_name(self, method):\n channel = method\n if self._realm is not None:\n channel = self._realm + '.' + channel\n channel = self.env.name + '.' + channel\n if self.channel_prefix:\n channel = self.channel_prefix + '.' + channel\n return channel\n" }, { "alpha_fraction": 0.6242424249649048, "alphanum_fraction": 0.6606060862541199, "avg_line_length": 24.384614944458008, "blob_id": "0c311989c8f89d13443c933e10e843bc9583b61b", "content_id": "d9fd010bc47609551d988bde723f2aecaeae8acd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 330, "license_type": "no_license", "max_line_length": 75, "num_lines": 13, "path": "/CHANGES.rst", "repo_name": "embray/tracext-redispub", "src_encoding": "UTF-8", "text": "0.2 (unreleased)\n================\n\n* Changed the ``ticket.changed`` channel so that the ``\"new_values\"``\n keyword contains all of the changed ticket's fields, including those that\n did not change. This allows more flexibility in formatting the ticket\n changed messages.\n\n\n0.1 (03/02/2018)\n================\n\n* Initial release.\n" } ]
7
chunlinyao/yao-blog
https://github.com/chunlinyao/yao-blog
809c581a2dfd590c189d2cb2bfdd30b6d991a664
3cadea8cf99cbf958493747415cfc6046d5da4e4
9f15d64133de042876baa7ea1a9afa0314bd83ac
refs/heads/master
2021-01-17T12:10:39.511457
2010-09-07T06:55:18
2010-09-07T06:55:18
32,187,358
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5731818079948425, "alphanum_fraction": 0.5831817984580994, "avg_line_length": 33.45161437988281, "blob_id": "18d912d54f3f76a09b7abab359da69794022aedb", "content_id": "6131f0556f6f74fa040e66ffaab6906a0163a22e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2200, "license_type": "no_license", "max_line_length": 92, "num_lines": 62, "path": "/gae_render.py", "repo_name": "chunlinyao/yao-blog", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\r\nimport logging\r\nfrom jinja2 import FileSystemLoader\r\nfrom google.appengine.api import memcache\r\n\r\nclass render_jinja:\r\n \"\"\"Rendering interface to Jinja2 Templates\r\n \r\n Example:\r\n\r\n render= render_jinja('templates')\r\n render.hello(name='jinja2')\r\n \"\"\"\r\n def __init__(self, *a, **kwargs):\r\n extensions = kwargs.pop('extensions', [])\r\n globals = kwargs.pop('globals', {})\r\n\r\n from jinja2 import Environment,FileSystemLoader\r\n self._lookup = Environment(loader=PythonLoader(*a, **kwargs), extensions=extensions)\r\n self._lookup.globals.update(globals)\r\n \r\n def __getattr__(self, name):\r\n # Assuming all templates end with .html\r\n path = name + '.html'\r\n t = self._lookup.get_template(path)\r\n return t.render\r\ntry:\r\n mydata\r\nexcept NameError:\r\n logging.error(\"create jinja2 cache map\")\r\n mydata = {}\r\nimport base64\r\ndef get_data_by_name(name):\r\n if base64.b64encode(name) in mydata:\r\n return mydata[base64.b64encode(name)]\r\n return None\r\nclass PythonLoader(FileSystemLoader):\r\n \"\"\"A Jinja2 loader that loads pre-compiled templates.\"\"\"\r\n def load(self, environment, name, globals=None):\r\n \"\"\"Loads a Python code template.\"\"\"\r\n if globals is None:\r\n globals = {}\r\n #try for a variable cache\r\n code = get_data_by_name(name)\r\n if code is not None:\r\n logging.info(\"find in hashmap\")\r\n else:\r\n logging.info(\"slow memcache\")\r\n code = memcache.get(name)\r\n if code is None:\r\n logging.info(\"oops no memcache!!\")\r\n source, filename, uptodate = self.get_source(environment, name)\r\n template = file(filename).read().decode('utf-8')\r\n code = environment.compile(template, raw=True)\r\n memcache.set(name,code)\r\n logging.info(name)\r\n else:\r\n logging.info(\"hit memcache\")\r\n code = compile(code, name, 'exec')\r\n mydata[base64.b64encode(name)] = code\r\n return environment.template_class.from_code(environment, code,globals)\r\n\r\n" }, { "alpha_fraction": 0.563173770904541, "alphanum_fraction": 0.5681328177452087, "avg_line_length": 30.88965606689453, "blob_id": "2b0fa66661b461da53b587ceec40728fecd53adc", "content_id": "5d4573a583ecdbc0199751de4d8b1ea5f457147f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4638, "license_type": "no_license", "max_line_length": 102, "num_lines": 145, "path": "/data.py", "repo_name": "chunlinyao/yao-blog", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom google.appengine.api import users\nfrom google.appengine.ext import db\nimport urllib,string\nfrom utils import slugify, versionate, markdown\n\nclass Entry(db.Model):\n \"\"\"A single blog entry.\"\"\"\n author = db.UserProperty()\n title = db.StringProperty(required=True)\n slug = db.StringProperty(required=True)\n body = db.TextProperty(required=True)\n markdown = db.TextProperty(required=True)\n published = db.DateTimeProperty(auto_now_add=True)\n updated = db.DateTimeProperty(auto_now=True)\n tag_str = db.StringProperty(required=False,default=\"\") \n tags = db.ListProperty(db.Category)\n\n @property\n def tagurls(self):\n def encodeurl(x):\n x.url = urllib.quote(x.encode('utf-8'), safe='') \n return x\t\n return map(encodeurl, self.tags)\n \n def slugurl(self):\n return urllib.quote(self.slug.encode('utf-8'), safe='') \t\n\n def idurl(self):\n return urllib.quote(str(self.key().id()), safe='') \t\n \n def validate_tag_str(self):\n \"\"\" \"\"\"\n tag_str = self.tag_str\n if not type(tag_str) in [unicode, str]:\n raise ValueError('Passed tag_str must be of type string or unicode. not %s'%type(tag_str))\n\n tag_list = tag_str.split(',')\n tag_list = map(string.strip, tag_list)\n #tag_list = map(string.lowercase, tag_list) \n tag_list = {}.fromkeys(tag_list).keys()\n # Example: ['ai', 'computer science', 'lisp', '']\n # This removes that empty string\n try: tag_list.remove('')\n except: pass\n tag_list.sort()\n # Return list as an array of db.Category items\n # Example: [db.Category('ai'), db.Category('computer science'), db.Category('lisp')]\n return map(db.Category, tag_list) \n \n def update_tags(self):\n \"\"\"Update Tag cloud info\"\"\"\n oldtags = self.tags or []\n newtags = self.validate_tag_str() or []\n addtags = [x for x in newtags if (x not in oldtags) ]\n removetags = [x for x in oldtags if (x not in newtags) ]\n for tag_ in addtags:\n #tag_ = tag.encode('utf8')\n tags = Tag.all().filter('tag',tag_).fetch(10)\n if tags == []:\n tagnew = Tag(tag=tag_,entrycount=1)\n tagnew.put()\n else:\n tags[0].entrycount+=1\n tags[0].put() \n for tag_ in removetags:\n #tag_ = tag.encode('utf8')\n tags = Tag.all().filter('tag',tag_).fetch(10)\n if tags != []:\n tags[0].entrycount-=1\n if tags[0].entrycount == 0 :\n tags[0].delete()\n else:\n tags[0].put()\n self.tags = newtags;\n \nclass Tag(db.Model):\n tag = db.StringProperty(multiline=False)\n entrycount = db.IntegerProperty(default=0)\n valid = db.BooleanProperty(default = True)\n def tagurl(self):\n return urllib.quote(self.tag.encode('utf-8'), safe='')\n\n @staticmethod\n def entries_by_tag(tag):\n return Entry.all().filter('tags =', db.Category(tag)).order('-published')\n \n @staticmethod\n def taglist():\n return Tag.all().filter('valid =', True).order('-entrycount')\n \ndef all_entries():\n return Entry.all()\n\ndef last_updated():\n import datetime\n last = db.Query(Entry).order('-published').get()\n return last.updated if last else datetime.datetime.now()\n \n\ndef latest_entries(limit=10):\n return db.Query(Entry).order('-published').fetch(limit=limit)\n\n\ndef entry_by_slug(slug):\n '''We're assuming all entries contain a unique slug.'''\n return db.Query(Entry).filter(u'slug =', slug).get()\n\ndef entry_by_id(id):\n return Entry.get_by_id(long(id))\n\ndef exists_entry(slug):\n q = db.Query(Entry).filter(u'slug =', slug).get()\n return q is not None\n\ndef update_entry(slug, i):\n entry = entry_by_slug(slug)\n if entry:\n entry.title = i.title\n entry.markdown = i.markdown\n entry.body = markdown(i.markdown)\n entry.tag_str = i.tag_str\n entry.update_tags()\n entry.put()\n return entry\n\n\ndef insert_entry(entry):\n slug = slugify(entry.title)\n while exists_entry(slug):\n slug = versionate(slug) \n\n entry = Entry(\n author=users.get_current_user(),\n title=entry.title,\n body=markdown(entry.markdown),\n slug=slug,\n markdown=entry.markdown,\n tag_str = entry.tag_str\n )\n entry.update_tags()\n entry.put()\n \n return entry\n \n\n" }, { "alpha_fraction": 0.5851528644561768, "alphanum_fraction": 0.6026200652122498, "avg_line_length": 24.44444465637207, "blob_id": "8141aee20b019d0b8a0e10ed864dcd80a342b94b", "content_id": "0e0605fc50edf7ea4157e4ef9ef47c02db0ccdfc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 279, "license_type": "no_license", "max_line_length": 49, "num_lines": 9, "path": "/templates/about.html", "repo_name": "chunlinyao/yao-blog", "src_encoding": "UTF-8", "text": "{% extends \"sidebar.html\" %}\n\n{% block title %}自我介绍{% endblock %}\n\n{% block content %}\n <h1 style=\"margin-bottom:6pt\">自我介绍</h1>\n <p>搞了十几年软件。现在在从事Java开发,J2EE。</p>\n <p><a href=\"/\">&laquo; Back to the blog</a></p>\n{% endblock %}\n" }, { "alpha_fraction": 0.5652173757553101, "alphanum_fraction": 0.570652186870575, "avg_line_length": 16.399999618530273, "blob_id": "39d3e3b8bb11670bbbd6debf4a05fbcac169778b", "content_id": "1a6b1a013061139d9e5a144872a863f83afd7c01", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 184, "license_type": "no_license", "max_line_length": 26, "num_lines": 10, "path": "/forms.py", "repo_name": "chunlinyao/yao-blog", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\r\n# -*- coding: utf-8 -*-\r\n\r\nfrom lib.web import form\r\n\r\nnew_post = form.Form(\r\n form.Textbox('title'),\r\n form.Textarea('text'),\r\n form.Button('Submit!')\r\n)\r\n" }, { "alpha_fraction": 0.6040853261947632, "alphanum_fraction": 0.6046860814094543, "avg_line_length": 24.030075073242188, "blob_id": "95a025d02e2a817593f03f29ba9c16207fbc0ac3", "content_id": "ad842c573bf2ff886cc0c3e7cbab3fb4dab5389d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3329, "license_type": "no_license", "max_line_length": 82, "num_lines": 133, "path": "/main.py", "repo_name": "chunlinyao/yao-blog", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport sys\n\nsys.path.insert(0, 'lib/lib.zip')\n\nimport web, os\nimport data\nfrom auth import requires_admin\nimport datetime\nfrom google.appengine.api import users\n\nurls = (\n '/', 'Home',\n '/index.html', 'Home',\n '/index.htm', 'Home',\n '/entry/(.*)', 'Entry',\n '/qr/(.*)', 'QREntry',\n '/archive', 'Archive',\n '/about', 'About',\n '/feed', 'Feed',\n '/atom', 'Feed',\n '/compose', 'Post',\n '/clear-cache', 'ClearCache',\n '/tag/(.*)', 'Tag',\n '/sitemap.xml', 'SiteMap',\n)\n\nsettings = {\n \"blog_title\": u\"Yao's blog\",\n \"debug\": os.environ.get(\"SERVER_SOFTWARE\", \"\").startswith(\"Development/\"),\n}\nif settings['debug']:\n from web.contrib.template import render_jinja\nelse:\n from gae_render import render_jinja\n\nrender = render_jinja('templates')\nfrom datetimeformat import datetimeformat\nrender._lookup.filters['datetimeformat'] = datetimeformat\n\nimport urllib\ndef urlencode(value):\n return urllib.quote(value, safe='')\nrender._lookup.filters['urlencode'] = urlencode\n\ndef tags_list():\n return data.Tag.taglist()\n \nclass Home:\n def GET(self):\n entries = data.latest_entries()\n return render.home(entries=entries, **globals())\n\nclass Post:\n @requires_admin\n def GET(self):\n i = web.input(key=None)\n \n import logging\n entry = data.entry_by_slug(i.key) if i.key else None\n return render.compose(entry=entry,**globals())\n\n @requires_admin\n def POST(self):\n i = web.input(key=None,title=None,markdown=None,tag_str=None)\n if i.key:\n if data.exists_entry(i.key):\n entry = data.update_entry(i.key, i)\n else:\n return web.seeother(\"/\")\n else:\n entry = data.insert_entry(i)\n return web.seeother(\"/entry/\" + entry.slugurl())\n\n\nclass Entry:\n def GET(self, slug):\n entry = data.entry_by_slug(slug)\n if entry is None:\n raise web.notfound()\n return render.entry(entry=entry,**globals())\n\nclass QREntry:\n def GET(self, id):\n entry = data.entry_by_id(id)\n if entry is None:\n raise web.notfound()\n return render.entry(entry=entry,**globals())\n\n\nclass Archive:\n def GET(self):\n entries = data.all_entries()\n return render.archive(entries=entries,**globals())\n\nclass Feed:\n def GET(self):\n web.header('Content-Type', 'application/atom+xml')\n entries = data.latest_entries()\n if(entries):\n last_updated = max(e.updated for e in entries)\n else:\n last_updated = datetime.datetime.utcnow()\n return render.feed(last_updated=last_updated, entries=entries,**globals())\n\nclass About:\n def GET(self):\n return render.about(**globals())\n\nclass ClearCache:\n def GET(self):\n from google.appengine.api import memcache\n memcache.flush_all()\n return \"Memcache flushed.\" \n\nclass Tag:\n def GET(self, tag):\n entries = data.Tag.entries_by_tag(tag)\n return render.archive(entries=entries, tagname=tag, **globals())\n\nclass SiteMap:\n def GET(self):\n web.header('Content-Type', 'text/xml')\n return render.sitemap(**globals())\n\napp = web.application(urls, globals())\n\ndef main():\n app.cgirun()\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.591549277305603, "alphanum_fraction": 0.5962441563606262, "avg_line_length": 40.599998474121094, "blob_id": "bc98a1647e8d8a6214ea6d5e8ba0d0c221d23b26", "content_id": "baf1bfd4af518074851d3f0e1bb91217b18d4734", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 263, "license_type": "no_license", "max_line_length": 88, "num_lines": 5, "path": "/datetimeformat.py", "repo_name": "chunlinyao/yao-blog", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\r\ndef datetimeformat(value):\r\n weekday = (u\"星期一\",u\"星期二\",u\"星期三\",u\"星期四\",u\"星期五\",u\"星期六\",u\"星期日\",)\r\n return u\"%s %s年%s月%s日\" % (weekday[value.weekday()],value.year,value.month,value.day)\r\n" }, { "alpha_fraction": 0.5777669548988342, "alphanum_fraction": 0.5821550488471985, "avg_line_length": 29.875, "blob_id": "3963702c27806d03cdb7880e6d2b1055be19def1", "content_id": "39f31f3e87f1ae0f91d5dd8beb2c71f08cc2a7cb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2051, "license_type": "no_license", "max_line_length": 85, "num_lines": 64, "path": "/utils.py", "repo_name": "chunlinyao/yao-blog", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\r\n# -*- coding: utf-8 -*-\r\n\r\nimport os\r\nimport re,unicodedata\r\nfrom markdown import markdown as md\r\r\ndef markdown(text):\r\n return md(text)\r\n\r\ndef timestamp(d=None):\r\n '''returns a string representing a given datetime up to the microsecond.\r\n Couldnt find a way to use strftime up to that precision'''\r\n import datetime\r\n date = d or datetime.datetime.now()\r\n microseconds = date.isoformat().split('.')[-1]\r\n return ''.join([datetime.datetime.strftime(date, '%Y%m%d%H%M%S'), microseconds])\r\n\r\ndef slugify(s):\r\n \"\"\"Convert some string to a url-friendly name.\"\"\"\r\n #islug = unicodedata.normalize(\"NFKD\", s).encode(\r\n # \"ascii\", \"ignore\")\r\n #slug = re.sub(u\"[^\\w]+\", u\" \", s)\r\n slug = u\"-\".join(s.lower().strip().split())\r\n if not slug: slug = \"entry\"\r\n return slug\r\n\r\ndef versionate(s):\r\n \"\"\"\r\n Assumes s is a slug-type string.\r\n Returns another slug-type string with a number at the the end.\r\n Useful when you want unique slugs that may have been hashed to the same string.\r\n \"\"\"\r\n words = s.split(\"-\")\r\n if len(words) > 1:\r\n try:\r\n # Check if the last element is a number. If no exception, it is.\r\n # We'll substitute the number on the slug\r\n num = int(words[-1])\r\n words[-1] = str(num+1)\r\n except ValueError:\r\n #Not a number. We'll append the number 1 to create a new version.\r\n words.append('1')\r\n \r\n return '-'.join(words)\r\n\r\n\r\ndef save_uploaded_file(f, **kw):\r\n import datetime\r\n \r\n name = kw.pop('name', None) \r\n folder = kw.pop('folder', './')\r\n\r\n uploaded_filename = str(f)\r\n extension = ('.' in uploaded_filename and uploaded_filename.split('.')[-1]) or ''\r\n \r\n filename = str(name) if name is not None else '.'.join([timestamp(), extension])\r\n \r\n destination = open(os.path.join(folder, filename), 'w')\r\n for chunk in f.chunks():\r\n destination.write(chunk)\r\n destination.close()\r\n \r\n return filename\r\n \r\n \r\n" }, { "alpha_fraction": 0.5757071375846863, "alphanum_fraction": 0.5773710608482361, "avg_line_length": 33.411766052246094, "blob_id": "4470d3128f2a4360d2e79611329605255f174a0b", "content_id": "6bc49abd3f5fa9cf0b74ce17d039eac2c183145f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 601, "license_type": "no_license", "max_line_length": 76, "num_lines": 17, "path": "/auth.py", "repo_name": "chunlinyao/yao-blog", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport web\r\nfrom google.appengine.api import users\r\n\r\ndef requires_admin(method):\r\n \"\"\"Decorate with this method to restrict to site admins.\"\"\"\r\n def wrapper(self, *args, **kwargs):\r\n user = users.get_current_user()\r\n if not user:\r\n if web.ctx.method == \"GET\":\r\n raise web.seeother(users.create_login_url(web.ctx.fullpath))\r\n raise web.forbidden()\r\n elif not (users.is_current_user_admin()):\r\n raise web.forbidden()\r\n else:\r\n return method(self, *args, **kwargs)\r\n return wrapper\r\n" } ]
8
aizoule/piko-san
https://github.com/aizoule/piko-san
073bef67e70647aa47d5947ef541352d29a8b369
130dd779270cd2b827cd98218abef95cb9b5fcc2
6ac3fe082bda2a40b0c60f601cc4eed94aabad7c
refs/heads/master
2018-11-27T17:04:21.640133
2018-11-04T16:16:33
2018-11-04T16:16:33
144,200,285
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5979428291320801, "alphanum_fraction": 0.6115487217903137, "avg_line_length": 44.99101638793945, "blob_id": "061b78467b3866c99b8e237c91af4a979aa61b90", "content_id": "b60f13438379a3732ba1144af71e66e12fb127e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15492, "license_type": "no_license", "max_line_length": 553, "num_lines": 334, "path": "/bot.py", "repo_name": "aizoule/piko-san", "src_encoding": "UTF-8", "text": "import random\nimport asyncio\nimport aiohttp\nimport urllib3\nimport urllib\nimport Pymoe\nimport json\nimport discord\nimport datetime\nimport requests as rq\nfrom discord import Game\nfrom discord.ext.commands import Bot\nfrom discord.ext import commands\nimport time\nimport scrapy\n\n\nBOT_PREFIX = (\".\", \"!\")\nTOKEN = \"NDUzOTg2NDcyNDAwMjU3MDI1.DgSx8w.U3Hce199i5q9yL4S7HhoeEzEZFE\" # Get at discordapp.com/developers/applications/me\n\nclient = Bot(command_prefix=BOT_PREFIX)\nAn = Pymoe.Anilist()\n\n@client.command(name='8ball',\n description=\"Answers a yes/no question.\",\n brief=\"Answers from the beyond.\",\n aliases=['eight_ball', 'eightball', '8-ball'],\n pass_context=True)\nasync def eight_ball(context):\n possible_responses = [\n 'HAYIR',\n 'Olacakmış gibi durmuyor',\n 'Söylemesi zor',\n 'Olabilir',\n 'Kesinlikle',\n ]\n await client.say(random.choice(possible_responses) + \", \" + context.message.author.mention)\n\n\n@client.command()\nasync def square(number):\n squared_value = int(number) * int(number)\n await client.say(str(number) + \" squared is \" + str(squared_value))\n\nclass Anime:\n\n @client.command(pass_context=True)\n async def anime(ctx):\n try:\n virgul = \", \"\n new_msg = ctx.message.content[7:]\n search = An.search.anime(new_msg)\n ide = search['data']['Page']['media'][0]['id']\n episodes = search['data']['Page']['media'][0]['episodes']\n episodes_dumps = json.dumps(episodes)\n if episodes_dumps == \"null\":\n episodes = \"Currently airing or unknown\"\n else:\n episodes = episodes\n animename = search['data']['Page']['media'][0]['title']['romaji']\n animename_en = search['data']['Page']['media'][0]['title']['english']\n animename_en_dumps = json.dumps(animename_en)\n rank = search['data']['Page']['media'][0]['popularity']\n score = str(search['data']['Page']['media'][0]['averageScore']) + \"%\"\n thumb = search['data']['Page']['media'][0]['coverImage']['large']\n season = search['data']['Page']['media'][0]['season']\n id = An.get.anime(ide)\n b = json.dumps(id)\n c = json.loads(b)\n d = c['data']['Media']['description']\n summary = d.replace(\"<br>\", \"\")\n img = id['data']['Media']['bannerImage']\n g = id['data']['Media']['genres']\n genres = virgul.join(g)\n s = id['data']['Media']['synonyms']\n synonyms = virgul.join(s)\n embed = discord.Embed(title=str(animename) + \" (English: \" + str(animename_en) + \")\", color=0x2a9ff, url=\"https://anilist.co/anime/{}\".format(ide))\n if animename_en_dumps == \"null\" or animename_en_dumps == animename:\n embed = discord.Embed(title=animename, color=0x2a9ff, url=\"https://anilist.co/anime/{}\".format(ide))\n else:\n animename_en = animename_en\n embed.set_author(name=ctx.message.author.name, icon_url=ctx.message.author.avatar_url)\n embed.set_thumbnail(url=thumb)\n embed.set_image(url=img)\n embed.add_field(name=\"Summary\", value=summary)\n embed.add_field(name=\"Episodes\", value=episodes, inline=True)\n embed.add_field(name=\"Rank\", value=rank, inline=True)\n embed.add_field(name=\"Average Score\", value=score, inline=True)\n embed.add_field(name=\"Genres\", value=genres, inline=True)\n embed.add_field(name=\"Synonyms\", value=synonyms, inline=True)\n embed.set_footer(text=\"Bilgiler AniList'ten alınmıştır.\", icon_url=\"http://aizoule.com/piko-san/images/anilist.png\")\n await client.say(embed=embed)\n\n except IndexError:\n print(\"\")\n finally:\n not_found = json.dumps(search)\n not_json = json.loads(not_found)\n embed2 = discord.Embed(title=\"Aradığınız anime bulunamadı.\", colour=discord.Colour(0x2a9ff), description=\"Arattığınız anime olan \\\"{}\\\" AniList sitesinin veritabanında bulunamadı.\\nDoğru yazdığınıza emin misiniz?\".format(new_msg))\n embed2.set_author(name=self.message.author.name, icon_url=self.message.author.avatar_url)\n embed2.set_footer(text=\"Bilgiler AniList'ten alınmıştır.\", icon_url=\"http://aizoule.com/piko-san/images/anilist.png\")\n if not_json['data']['Page']['pageInfo']['lastPage'] == 0:\n await client.say(embed=embed2)\n \n@client.command(pass_context=True)\nasync def embedtest(ctx):\n author = ctx.message.author.name + \"#\" + ctx.message.author.discriminator\n embed = discord.Embed(title=\"Heres an embed example.\", description=\"LOREM IPSUM\")\n embed.set_author(name=author, icon_url=ctx.message.author.avatar_url)\n embed.set_footer(text=\"EMBED4LIFE\")\n await client.say(embed=embed)\n\n@client.command()\nasync def bitcoin():\n url = 'https://www.doviz.com/api/v1/coins/bitcoin/latest'\n r = rq.get(url).text\n rq_json = json.loads(r)\n selling = rq_json['selling']\n await client.say(\"Bitcoinin şu anki karşılığı: ₺\" + str(selling))\n\n@client.command()\nasync def altin():\n url = 'https://www.doviz.com/api/v1/golds/all/latest'\n r = rq.get(url).text\n rq_json = json.loads(r)\n ab = rq_json[0]['update_date']\n timestamp = datetime.datetime.fromtimestamp(int(ab)).strftime('%d-%m-%Y %H:%M:%S')\n gram_altin = str(rq_json[5]['selling'])\n gram_altin_alis = str(rq_json[5]['buying'])\n ceyrek_altin = str(rq_json[0]['selling'])\n ceyrek_altin_alis = str(rq_json[0]['buying'])\n yarim_altin = str(rq_json[1]['selling'])\n yarim_altin_alis = str(rq_json[1]['buying'])\n tam_altin = str(rq_json[2]['selling'])\n tam_altin_alis = str(rq_json[2]['buying'])\n cumhuriyet_altin = str(rq_json[3]['selling'])\n cumhuriyet_altin_alis = str(rq_json[3]['buying'])\n ata_altin = str(rq_json[6]['selling'])\n ata_altin_alis = str(rq_json[6]['buying'])\n embed = discord.Embed(colour=0xFFD700)\n embed.set_author(name=\"Altın Fiyatları\", icon_url=\"http://aizoule.com/piko-san/images/gold.png\")\n embed.add_field(name=\"Gram Altın Satış\", value=\"₺\" + gram_altin, inline=True)\n embed.add_field(name=\"Gram Altın Alış\", value=\"₺\" + gram_altin_alis, inline=True)\n embed.add_field(name=\"Çeyrek Altın Satış\", value=\"₺\" + ceyrek_altin, inline=True)\n embed.add_field(name=\"Çeyrek Altın Alış\", value=\"₺\" + ceyrek_altin_alis, inline=True)\n embed.add_field(name=\"Yarım Altın Satış\", value=\"₺\" + yarim_altin, inline=True)\n embed.add_field(name=\"Yarım Altın Alış\", value=\"₺\" + yarim_altin_alis, inline=True)\n embed.add_field(name=\"Tam Altın Satış\", value=\"₺\" + tam_altin, inline=True)\n embed.add_field(name=\"Tam Altın Alış\", value=\"₺\" + tam_altin_alis, inline=True)\n embed.add_field(name=\"Cumhuriyet Altın Satış\", value=\"₺\" + cumhuriyet_altin, inline=True)\n embed.add_field(name=\"Cumhuriyet Altın Alış\", value=\"₺\" + cumhuriyet_altin_alis, inline=True)\n embed.add_field(name=\"Ata Altın Satış\", value=\"₺\" + ata_altin, inline=True)\n embed.add_field(name=\"Ata Altın Alış\", value=\"₺\" + ata_altin_alis, inline=True)\n embed.set_footer(text=\"Bilgiler Doviz.com'dan alınmıştır. | Son güncellenme tarihi: \" + str(timestamp), icon_url=\"http://aizoule.com/piko-san/images/dovizcom.jpg\")\n await client.say(embed=embed)\n\n@client.command()\nasync def dolar():\n url = 'http://www.doviz.com/api/v1/currencies/USD/latest'\n r = rq.get(url).text\n rq_json = json.loads(r)\n dolar = rq_json['selling']\n dolar_alis = rq_json['buying']\n ab = rq_json['update_date']\n timestamp = datetime.datetime.fromtimestamp(int(ab)).strftime('%d-%m-%Y %H:%M:%S')\n embed = discord.Embed(color=0x85bb65)\n embed.set_author(name=\"Dolar Kuru\", icon_url=\"http://aizoule.com/piko-san/images/dolar.png\")\n embed.add_field(name=\"Dolar Satış\", value=\"₺\" + str(dolar), inline=True)\n embed.add_field(name=\"Dolar Alış\", value=\"₺\" + str(dolar_alis), inline=True)\n embed.set_footer(text=\"Bilgiler Doviz.com'dan alınmıştır. | Son güncellenme tarihi: \" + str(timestamp), icon_url=\"http://aizoule.com/piko-san/images/dovizcom.jpg\")\n await client.say(embed=embed)\n\n@client.command(name='euro')\nasync def euro():\n url = 'http://www.doviz.com/api/v1/currencies/EUR/latest'\n r = rq.get(url).text\n rq_json = json.loads(r)\n euro = rq_json['selling']\n euro_alis = rq_json['buying']\n ab = rq_json['update_date']\n colors = [\n 0x808080,\n 0xff0000,\n 0x0000ff,\n 0xffa500,\n 0x00ff00,\n 0xffff00,\n 0x800080,\n ]\n timestamp = datetime.datetime.fromtimestamp(int(ab)).strftime('%d-%m-%Y %H:%M:%S')\n embed = discord.Embed(color=random.choice(colors))\n embed.set_author(name=\"Euro Kuru\", icon_url=\"http://aizoule.com/piko-san/images/euro.png\")\n embed.add_field(name=\"Euro Satış\", value=\"₺\" + str(euro), inline=True)\n embed.add_field(name=\"Euro Alış\", value=\"₺\" + str(euro_alis), inline=True)\n embed.set_footer(text=\"Bilgiler Doviz.com'dan alınmıştır. | Son güncellenme tarihi: \" + str(timestamp), icon_url=\"http://aizoule.com/piko-san/images/dovizcom.jpg\")\n await client.say(embed=embed)\n\n@client.command(name='neko',\n description=\"Gives a random catgirl for you.\",\n brief=\"Answers from the beyond.\",\n pass_context=True)\nasync def neko(ctx):\n url = 'https://nekos.life/api/neko'\n r = rq.get(url).text\n r_json = json.loads(r)\n img = r_json['neko']\n colors = random.randint(0, 0xFFFFFF)\n embed = discord.Embed(title=\"Meow~\", color=colors)\n embed.set_image(url= img)\n embed.set_footer(text=\"I got this image from nekos.life\", icon_url=\"https://nekos.life/static/icons/favicon-194x194.png\")\n await client.say(embed=embed)\n \n@client.command(name='lewd',\n description=\"Gives a random catgirl for you with a special tag.\\n Tags you can use: 'feet', 'yuri', 'trap', 'futanari', 'hololewd', 'lewdkemo', 'solog', 'feetg', 'cum', 'erokemo', 'les', 'lewdk', 'lewd', 'gecg', 'eroyuri', 'eron', 'cum_jpg', 'bj', 'nsfw_neko_gif', 'solo', 'kemonomimi', 'nsfw_avatar', 'gasm', 'anal', 'hentai', 'avatar', 'erofeet', 'keta', 'blowjob', 'pussy', 'tits', 'holoero', 'pussy_jpg', 'pwankg', 'classic', 'kuni', 'femdom', 'spank', 'erok', 'boobs', 'Random_hentai_gif', 'smallboobs', 'ero'\\n Don't use with ''.\",\n brief=\"Answers from the beyond.\",\n pass_context=True)\nasync def lewd(ctx):\n if len(ctx.message.content) > 6:\n new_msg = ctx.message.content[6:]\n url = 'https://nekos.life/api/v2/img/{}'.format(new_msg)\n r = rq.get(url).text\n r_json = json.loads(r)\n img = r_json['url']\n colors = random.randint(0, 0xFFFFFF)\n embed = discord.Embed(title=\"Heres a lewdie {} image for you~\".format(new_msg), color=colors)\n embed.set_footer(text=\"I got this image from nekos.life\", icon_url=\"https://nekos.life/static/icons/favicon-194x194.png\")\n embed.set_image(url=img)\n await client.say(embed=embed)\n else:\n url2 = 'https://nekos.life/api/lewd/neko'\n r2 = rq.get(url2).text\n r_json2 = json.loads(r2)\n img2 = r_json2['neko']\n colors = random.randint(0, 0xFFFFFF)\n embed2 = discord.Embed(title=\"Lewdie~\", color=colors)\n embed2.set_image(url=img2)\n embed2.set_footer(text=\"I got this image from nekos.life\", icon_url=\"https://nekos.life/static/icons/favicon-194x194.png\")\n await client.say(embed=embed2)\n\n@client.command(name='nekov2',\n description=\"Gives a random catgirl for you.\",\n brief=\"Answers from the beyond.\",\n aliases=['nekogirl', 'catgirl'],\n pass_context=True)\nasync def nekov2(ctx):\n url = 'https://nekos.moe/api/v1/random/image?count=1&nsfw=false'\n r = rq.get(url).text\n r_json = json.loads(r)\n pic = 'https://nekos.moe/image/{}'.format(r_json['images'][0]['id'])\n colors = random.randint(0, 0xFFFFFF)\n virgul = \", \"\n t = r_json['images'][0]['tags']\n tags = virgul.join(t)\n likes = r_json['images'][0]['likes']\n favorites = r_json['images'][0]['favorites']\n embed = discord.Embed(title=\"Meow~\", color=colors)\n embed.set_image(url=pic)\n embed.add_field(name=\"Likes\", value=likes, inline=True)\n embed.add_field(name=\"Favorites\", value=favorites, inline=True)\n embed.add_field(name=\"Tags\", value=tags, inline=False)\n embed.set_footer(text=\"I get this image from nekos.moe\", icon_url=\"https://nekos.moe/static/favicon/apple-touch-icon.png\")\n await client.say(embed=embed)\n\n@client.command(name='lewdv2',\n description=\"Gives a random lewd catgirl for you.\",\n brief=\"Answers from the beyond.\",\n pass_context=True)\nasync def lewdv2(ctx):\n url = 'https://nekos.moe/api/v1/random/image?count=1&nsfw=true'\n r = rq.get(url).text\n r_json = json.loads(r)\n pic = 'https://nekos.moe/image/{}'.format(r_json['images'][0]['id'])\n colors = [\n 0xA11212,\n 0xAA5656,\n 0xa31870,\n 0xe059ae,\n 0xbc12b7,\n 0x825480,\n 0xb73119,\n 0xc8d815,\n 0x31bdd6,\n ]\n virgul = \", \"\n t = r_json['images'][0]['tags']\n tags = virgul.join(t)\n likes = r_json['images'][0]['likes']\n favorites = r_json['images'][0]['favorites']\n embed = discord.Embed(title=\"Lewdie~\", color=random.choice(colors))\n embed.set_image(url=pic)\n embed.add_field(name= \"Likes\", value=likes, inline=True)\n embed.add_field(name= \"Favorites\", value=favorites, inline=True)\n embed.add_field(name=\"Tags\", value=tags, inline=False)\n embed.set_footer(text=\"I get this image from nekos.moe\", icon_url=\"https://nekos.moe/static/favicon/apple-touch-icon.png\")\n await client.say(embed=embed)\n \n\n@client.command(pass_context=True)\nasync def makeplaylist(ctx):\n\turl = 'http://www.playlist-converter.net/#/freetext='\n\ttracklist = ctx.message.content[14::]\n\ttracks = tracklist\n\tencoded = urllib.parse.quote(tracks)\n\tfinal = url+encoded\n\tawait client.say(\"Use the URL below and click a platform for exporting your playlist to that platform.\\n\" + str(final))\n\n\nclass Ping:\n\n @client.command(pass_context=True)\n async def ping(ctx):\n channel = ctx.message.channel\n t1 = time.perf_counter()\n await client.send_typing(channel)\n t2 = time.perf_counter()\n embed = discord.Embed(title=\"Pong! :ping_pong:\", description=\"**Ping süresi: {}ms**\".format(round((t2 - t1) * 1000)), color=0x3aff4c)\n await client.say(embed=embed)\n\n@client.event\nasync def on_ready():\n await client.change_presence(game=Game(name=\"with humans\"))\n print(\"Logged in as \" + client.user.name)\n\nasync def list_servers():\n await client.wait_until_ready()\n while not client.is_closed:\n print(\"Current servers:\")\n for server in client.servers:\n print(server.name)\n await asyncio.sleep(600)\n\n\nclient.loop.create_task(list_servers())\nclient.run(TOKEN)\n" } ]
1
soumalipal/BTP
https://github.com/soumalipal/BTP
1f2ae1067319367fa029eb263c4b581c2713f9c8
376c7cdbda589ef1cf066e82f2a152051e4f2372
bc6d580c3a28f0c19e12176db7b4fac31848f6ea
refs/heads/master
2020-04-26T09:12:04.787029
2019-03-01T15:17:02
2019-03-01T15:17:02
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5704125165939331, "alphanum_fraction": 0.5903271436691284, "avg_line_length": 26.038461685180664, "blob_id": "72428d715dd946b67bf470406bc189066afe709d", "content_id": "e8821121fc75991102bf2d9fe59b54083f2edeb3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 703, "license_type": "no_license", "max_line_length": 69, "num_lines": 26, "path": "/src/basefuncs.py", "repo_name": "soumalipal/BTP", "src_encoding": "UTF-8", "text": "import serial\nimport os\nimport time\nfrom src.arduino import Arduino\n\ndef handshake(devlist):\n ardlist=devlist\n devices=os.listdir(\"/dev\")\n for dev in devices:\n if dev[0:6]==\"ttyUSB\" or dev[0:6]==\"ttyACM\":\n arduino=serial.Serial(\"/dev/\" + str(dev),9600,timeout=10)\n time.sleep(0.1)\n arduino.flushInput()\n id=int(arduino.readline().decode().strip().strip('\\x00'))\n print(id)\n devlist[id]=arduino\n arduino.write(b'x')\n ardlist[id]=Arduino(arduino)\n return ardlist\n \n\n\ndef shutdown(ardlist):\n for arduino in ardlist:\n arduino.device.setDTR(False)\n arduino.device.setDTR(True)\n" }, { "alpha_fraction": 0.7095237970352173, "alphanum_fraction": 0.7095237970352173, "avg_line_length": 20.03333282470703, "blob_id": "13ee71d23eaf366abec2a97ff3ff825834c39310", "content_id": "c025f23ef0a41e12b9a0e6902f4796934cf6c9f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 630, "license_type": "no_license", "max_line_length": 60, "num_lines": 30, "path": "/main.py", "repo_name": "soumalipal/BTP", "src_encoding": "UTF-8", "text": "import tkinter as tk\nimport time\nfrom threading import Thread,Timer\n\nfrom src.interface import Interface\nfrom src.basefuncs import handshake,shutdown\nfrom src.serialcom import Serialcom\n\n\ndef main():\n devlist=[None,None,None,None]\n ardlist=handshake(devlist)\n\n serialcom=Serialcom(ardlist)\n serialcom.startreadingserial()\n #data_thread=Thread(target=serialcom.startreadingserial)\n #data_thread.start()\n\n\n root=tk.Tk()\n root.attributes('-zoomed', True)\n root.configure(bg='white') \n\n interf=Interface(root,ardlist)\n interf.guiloop()\n \n shutdown(ardlist)\n\nif __name__ == \"__main__\":\n main()" }, { "alpha_fraction": 0.6181297898292542, "alphanum_fraction": 0.6431297659873962, "avg_line_length": 26.87765884399414, "blob_id": "8e5245dcebd5cadbdeaa80b6aab19e6bfad12ecc", "content_id": "ccad1df251f2de864ec1536f0996c6563cb6665c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5240, "license_type": "no_license", "max_line_length": 171, "num_lines": 188, "path": "/temp.py", "repo_name": "soumalipal/BTP", "src_encoding": "UTF-8", "text": "import tkinter as tk\nimport serial\nimport time\nimport os\nfrom PIL import ImageTk\n#import RPi.GPIO as GPIO \n \n#Function Definitions\n \n#Find Connected Arduinos\n \ndef find_dev():\n global left_ar,right_ar,rpm_ar,cabin_ar,back_ar\n devices=os.listdir(\"/dev\")\n for dev in devices:\n if dev[0:6]==\"ttyUSB\" or dev[0:6]==\"ttyACM\":\n arduino=serial.Serial(\"/dev/\" + str(dev),9600)\n assign=arduino.readline()\n assign=arduino.readline()\n \n para=str(assign).split() \n #print(para)\n if(para[0][2:]==\"b'Distancelf:\"):\n left_ar=arduino\n elif para[0]==\"b'Distancerf:\":\n right_ar=arduino\n elif para[0]==\"b'RPM:\":\n rpm_ar=arduino\n elif para[0]==\"b'cabin\":\n cabin_ar=arduino\n elif para[0]==\"b'Distancebl\":\n back_ar=arduino\n \n \n#Update Ultrasonic Sensor Labels-Left \n \ndef update_left():\n global left_ar\n data=left_ar.readline()\n data_split=str(data).split()\n label_lf.config(text=data_split[1])\n label_lb.config(text=data_split[3])\n if int(data_split[1])<50:\n label_lf.place(x=((screen_width-picture_width)/2)+50,y=((screen_height-picture_height)/2)+250)\n else:\n label_lf.place(x=screen_width+1,y=screen_height+1)\n if int(data_split[3])<50:\n label_lb.place(x=((screen_width-picture_width)/2)+50,y=((screen_height+picture_height)/2)-250)\n else:\n label_lb.place(x=screen_width+1,y=screen_height+1)\n \n#Update Ultrasonic Sensor Labels-Right\n \ndef update_right():\n global right_ar\n data=right_ar.readline()\n data_split=str(data).split()\n label_rf.config(text=data_split[1])\n label_rb.config(text=data_split[3])\n if int(data_split[1])<50:\n label_rf.place(x=((screen_width+picture_width)/2)+20,y=((screen_height-picture_height)/2)+30)\n else:\n label_rf.place(x=screen_width+1,y=screen_height+1)\n if int(data_split[3])<50:\n label_rb.place(x=((screen_width+picture_width)/2)+20,y=((screen_height+picture_height)/2)-30)\n else:\n label_rb.place(x=screen_width+1,y=screen_height+1)\n \n#Update Ultrasonic Sensor Labels-Back\n \ndef update_back():\n global back_ar\n data=back_ar.readline()\n data_split=str(data).split()\n label_bl.config(text=data_split[1])\n label_br.config(text=data_split[3])\n if int(data_split[1])<50:\n label_bl.place(x=((screen_width-picture_width)/2)+20,y=((screen_height+picture_height)/2)+30)\n else:\n label_bl.place(x=screen_width+1,y=screen_height+1)\n if int(data_split[3])<50:\n label_br.place(x=((screen_width+picture_width)/2)-20,y=((screen_height+picture_height)/2)+30)\n else:\n label_br.place(x=screen_width+1,y=screen_height+1)\n \n#Update Time\n \ndef update_time():\n global startTime\n label_Time.config(text=\"Time of Operation \" + str(int((time.time()-startTime)/3600))+\" Hours \"+str(int((time.time()-startTime)/60))+\" Minutes\",fg=\"black\",bg=\"yellow\") \n label_Time.place(x=0,y=0)\n \n#Relay Control Button Toggle \n \ndef relay_control():\n global flag\n if flag==0:\n #GPIO.output(40, GPIO.HIGH)\n relay_button.config(text=\"Turn Off\",command=relay_control)\n flag=1\n elif flag==1:\n #GPIO.output(40, GPIO.LOW)\n relay_button.config(text=\"Turn On\",command=relay_control)\n flag=0\n \n#Update\n \ndef update():\n update_left()\n #update_right()\n #update_back()\n #update_rpm()\n update_time()\n #Recursion for each update\n root.after(1000, update)\n \n \n \n#Initialize Time\n \nstartTime=time.time()\n \n#Initialize RPi for GPIO\n \n##GPIO.setmode(GPIO.BOARD) \n##GPIO.setup(40,GPIO.OUT,initial=GPIO.LOW) \nflag = 0 #for Relay Control\n \n#Tkinter for GUI\n \nroot=tk.Tk()\n \n#Fullscreen\n \nroot.attributes('-zoomed', True)\nroot.wm_attributes('-alpha', 0.7) \nroot.configure(bg='white')\n \n#Getting Screen Resolution\n \nscreen_width = root.winfo_screenwidth()\nscreen_height = root.winfo_screenheight()\n \n#Center Image\nphoto_vehicle=ImageTk.PhotoImage(file=\"/home/ritwick/Documents/Arduino/US/car.png\")\ncenter_img=tk.Label(root,image=photo_vehicle,relief='flat',bg='white')\n\npicture_height=photo_vehicle.height()\npicture_width=photo_vehicle.width()\n \ncenter_img.place(x=(screen_width-picture_width)/2,y=(screen_height-picture_height)/2)\n \n#Labels for Ultrasonic Sensor\n \nlabel_lf= tk.Label(root,text=\"lf\")\nlabel_lf.config(bg='white',font=(\"Courier\", 32),fg='black')\nlabel_lb= tk.Label(root,text=\"lb\")\nlabel_lb.config(bg='white',font=(\"Courier\", 32),fg='black')\nlabel_rf= tk.Label(root,text=\"rf\")\nlabel_rb= tk.Label(root,text=\"rb\")\nlabel_bl= tk.Label(root,text=\"bl\")\nlabel_br= tk.Label(root,text=\"br\")\n \n#Label for RPM Sensor\n \nlabel_rpm=tk.Label(root,text=\"RPM\",bg=\"yellow\",fg=\"black\")\nlabel_rpm.place(x=(screen_width/2)-10,y=(screen_height/2)-10)\n \n#Label for Time\n \nlabel_Time=tk.Label(root,text=startTime)\n \n#Button to Control Relay\n \nrelay_button=tk.Button(root,text=\"Turn On\",command=relay_control)\nrelay_button.place(x=(screen_width/2)-10,y=((screen_height+picture_height)/2)+60)\n \n#Find and Assign Connected Arduinos\n \nfind_dev()\n \n#Update Each Component\n \nupdate()\n \n#Mainloop\n \nroot.mainloop()" }, { "alpha_fraction": 0.6307692527770996, "alphanum_fraction": 0.6307692527770996, "avg_line_length": 21.565217971801758, "blob_id": "08d737090958dcf60d2e2b0d15c8f518e182ec81", "content_id": "66d92c33ac544849aa128ad8eb997110245dee17", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 520, "license_type": "no_license", "max_line_length": 55, "num_lines": 23, "path": "/src/serialcom.py", "repo_name": "soumalipal/BTP", "src_encoding": "UTF-8", "text": "import tkinter as tk\nimport serial\nimport time\nimport os\nfrom threading import Thread,Timer\nfrom PIL import ImageTk\nfrom src.arduino import Arduino\nfrom queue import Queue\n\nclass Serialcom():\n def __init__(self,ardlist):\n self.ardlist=ardlist\n \n \n def startreadingserial(self):\n for ard in self.ardlist:\n #q=self.mainbuf[i]\n t=Thread(target=self.ardserial,args=(ard,))\n t.start()\n\n def ardserial(self,ard):\n while True:\n ard.getdata()\n\n" }, { "alpha_fraction": 0.517320990562439, "alphanum_fraction": 0.5496535897254944, "avg_line_length": 14.464285850524902, "blob_id": "9d06947ce3f789f4f6d0ba2573927b5877420f4d", "content_id": "fcdb171749e24721fff15a67bbe01f80de26b313", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 433, "license_type": "no_license", "max_line_length": 56, "num_lines": 28, "path": "/arc/ar_1/ar_1.ino", "repo_name": "soumalipal/BTP", "src_encoding": "UTF-8", "text": "int x=5;\nvoid setup() {\n\n pinMode(LED_BUILTIN, OUTPUT);\n\n Serial.begin(9600); // Starts the serial communication\n while(true)\n {\n if (Serial.available() > 0) {\n char c = Serial.read();\n if(c=='x') break;\n }\n Serial.print(\"1\");\n Serial.println();\n \n delay(100); \n }\n \n}\nvoid loop() {\n digitalWrite(LED_BUILTIN, HIGH); \n\n Serial.print(\"psrpm:\");\n Serial.println(x);\n x++;\n \n delay(1000);\n}\n" }, { "alpha_fraction": 0.5099818706512451, "alphanum_fraction": 0.5372051000595093, "avg_line_length": 16.21875, "blob_id": "13a52fdadc30428fa310eb0501dc6b87e0f13f0b", "content_id": "ebd0bc9bcc2790d95e1aab6a020fe14985da5b96", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 551, "license_type": "no_license", "max_line_length": 56, "num_lines": 32, "path": "/arc/ar_0/ar_0.ino", "repo_name": "soumalipal/BTP", "src_encoding": "UTF-8", "text": "int trigPin=13;\n\nvoid setup() {\n\n pinMode(trigPin, OUTPUT);\n\n Serial.begin(9600); // Starts the serial communication\n while(true)\n {\n if (Serial.available() > 0) {\n char c = Serial.read();\n if(c=='x') break;\n }\n Serial.print(\"0\");\n Serial.println();\n \n delay(100); \n }\n \n}\nvoid loop() {\n\n if (Serial.available() > 0) {\n char c = Serial.read();\n if(c=='o') digitalWrite(trigPin, HIGH); \n else if(c=='x') digitalWrite(trigPin, LOW); \n }\n \n Serial.println(\"relay:test\");\n \n delay(100);\n}\n" }, { "alpha_fraction": 0.595083475112915, "alphanum_fraction": 0.6182745695114136, "avg_line_length": 37.85585403442383, "blob_id": "94444420d72d81641ca73b9a29d3f201c4f17b2d", "content_id": "1469f2817bcb5e2a913de2be7e129b6944f9dc03", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4312, "license_type": "no_license", "max_line_length": 219, "num_lines": 111, "path": "/src/interface.py", "repo_name": "soumalipal/BTP", "src_encoding": "UTF-8", "text": "import tkinter as tk\nimport serial\nimport time\nimport os\nfrom queue import Queue\nfrom PIL import ImageTk\nfrom src.arduino import Arduino\n\nclass Interface:\n def __init__(self,root,ardlist):\n #self.ardlist=ardlist\n self.root=root\n self.screen_width = root.winfo_screenwidth()\n self.screen_height = root.winfo_screenheight()\n self.car_running=False\n\n self.ar_0=ardlist[0]\n self.ar_1=ardlist[1]\n self.ar_2=ardlist[2]\n self.ar_3=ardlist[3]\n \n \n def guiloop(self):\n \n self.text_usfl=tk.StringVar(self.root)\n self.text_usfr=tk.StringVar(self.root)\n self.text_usbl=tk.StringVar(self.root)\n self.text_usbr=tk.StringVar(self.root)\n self.text_time=tk.StringVar(self.root)\n self.text_rpm=tk.StringVar(self.root)\n \n\n photo_vehicle=ImageTk.PhotoImage(file=os.path.dirname(os.path.realpath(__file__))+\"/img_res/car.png\")\n center_img=tk.Label(self.root,image=photo_vehicle,relief='flat',bg='white')\n picture_height=photo_vehicle.height()\n picture_width=photo_vehicle.width() \n center_img.place(x=(self.screen_width-picture_width)/2,y=(self.screen_height-picture_height)/2) \n \n self.main_frame=tk.Frame(self.root,bg='')\n \n label_usfl=tk.Label(self.main_frame,textvariable=self.text_usfl,bg='white',font=(\"Courier\", 24),fg='black',anchor=\"center\")\n label_usfl.place(x=((self.screen_width-picture_width)/2),y=((self.screen_height-picture_height)/2)+250)\n\n\n label_usfr=tk.Label(self.main_frame,textvariable=self.text_usfr,bg='white',font=(\"Courier\", 24),fg='black')\n label_usfr.place(x=((self.screen_width-picture_width)/2)+490,y=((self.screen_height-picture_height)/2)+250)\n\n label_usbl=tk.Label(self.main_frame,textvariable=self.text_usbl,bg='white',font=(\"Courier\", 24),fg='black')\n label_usbl.place(x=((self.screen_width-picture_width)/2),y=((self.screen_height-picture_height)/2)+750)\n\n label_usbr=tk.Label(self.main_frame,textvariable=self.text_usbr,bg='white',font=(\"Courier\", 24),fg='black')\n label_usbr.place(x=((self.screen_width-picture_width)/2)+490,y=((self.screen_height-picture_height)/2)+750)\n\n label_rpm=tk.Label(self.main_frame,textvariable=self.text_rpm,bg='white',font=(\"Courier\", 32),fg='black')\n label_rpm.place(x=(self.screen_width/2)-150,y=100)\n \n label_Time=tk.Label(self.root,textvariable=self.text_time,bg='white',font=(\"Courier\", 28),fg='black')\n label_Time.place(x=5,y=5)\n \n self.button_relay=tk.Button(self.root,height=2,width=10,text=\"Start Car\",command=self.toggle_car,bg='#009688',font=(\"Courier\", 42),activeforeground='white',fg='white',bd=0,justify='center', highlightthickness=0)\n self.button_relay.place(x=self.screen_width-360,y=0)\n\n self.update_time()\n self.root.mainloop()\n\n\n def toggle_car(self):\n if self.car_running:\n self.ar_0.senddata('x')\n self.button_relay.config(text=\"Start Car\",bg='#009688')\n self.car_running=False\n self.main_frame.pack_forget()\n\n else:\n \"\"\"for q in self.mainbuf:\n with q.mutex:\n q.queue.clear()\"\"\"\n self.ar_0.senddata('o')\n self.button_relay.config(text=\"Stop Car\",bg='#ff6347')\n self.car_running=True\n self.main_frame.pack(fill='both',expand=True)\n self.updateall()\n\n def updateall(self):\n self.update_ar_1()\n self.update_ar_2()\n self.update_ar_3()\n if(self.car_running):\n self.root.after(10,self.updateall)\n\n def update_ar_1(self):\n sensdata=self.ar_1.getcurr_data()\n self.text_rpm.set(sensdata['psrpm']+\" rpm\")\n\n def update_ar_2(self):\n sensdata=self.ar_2.getcurr_data()\n\n self.text_usfl.set(sensdata['usfld'])\n self.text_usfr.set(sensdata['usfrd'])\n \n \n def update_ar_3(self):\n sensdata=self.ar_3.getcurr_data()\n\n self.text_usbl.set(sensdata['usbld'])\n self.text_usbr.set(sensdata['usbrd'])\n \n \n def update_time(self):\n self.text_time.set(time.strftime(\"%I:%M %p\",time.localtime()))\n self.root.after(100,self.update_time)" }, { "alpha_fraction": 0.5452091693878174, "alphanum_fraction": 0.5600540041923523, "avg_line_length": 22.935483932495117, "blob_id": "7d4070825e15d5c2a01f45d2e404611657db026b", "content_id": "882ff132316f64c409b5fae4e5db8703bf8ba8a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 741, "license_type": "no_license", "max_line_length": 66, "num_lines": 31, "path": "/src/arduino.py", "repo_name": "soumalipal/BTP", "src_encoding": "UTF-8", "text": "import tkinter as tk\nimport serial\nimport time\nimport os\nfrom threading import Thread,Timer\nfrom PIL import ImageTk\n\nclass Arduino:\n def __init__(self,device):\n self.device=device\n\n def getdata(self):\n #self.device.reset_input_buffer()\n #time.sleep(0.01)\n final_data={}\n data=self.device.readline().decode().strip().strip('\\x00')\n print(data)\n sensdata=data.split(\"|\")\n for s in sensdata:\n t=s.split(\":\")\n t[1]=\" \"*(5-len(t[1]))+t[1]\n final_data[t[0]]=t[1]\n \n self.curr_data=final_data\n \n \n def getcurr_data(self):\n return self.curr_data\n\n def senddata(self,data):\n self.device.write(data.encode())" }, { "alpha_fraction": 0.6403384804725647, "alphanum_fraction": 0.6706628799438477, "avg_line_length": 18.424657821655273, "blob_id": "3db9be8f8ff14c607e4bb7652ee6cceb75e6d0c7", "content_id": "dcdf93bd8edf5f38c99b87c5bd6563125193c282", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1418, "license_type": "no_license", "max_line_length": 62, "num_lines": 73, "path": "/arc/ar_2/ar_2.ino", "repo_name": "soumalipal/BTP", "src_encoding": "UTF-8", "text": "const int trigPinfl = 12;\nconst int echoPinfl = 11;\n\n\nconst int trigPinfr = 10;\nconst int echoPinfr = 9;\n\nlong durationfl;\nint distancefl;\n\n\nlong durationfr;\nint distancefr;\n\nint x=-1000,y=-5000;\n\nvoid setup() {\n pinMode(trigPinfl, OUTPUT); // Sets the trigPin as an Output\n pinMode(echoPinfl, INPUT); // Sets the echoPin as an Input\n\n\n pinMode(trigPinfr, OUTPUT); // Sets the trigPin as an Output\n pinMode(echoPinfr, INPUT); // Sets the echoPin as an Input*/\n\n pinMode(LED_BUILTIN, OUTPUT);\n\n Serial.begin(9600); // Starts the serial communication\n while(true)\n {\n if (Serial.available() > 0) {\n char c = Serial.read();\n if(c=='x') break;\n }\n Serial.print(\"2\");\n Serial.println();\n\n delay(100); \n }\n}\nvoid loop() {\n\n digitalWrite(LED_BUILTIN, HIGH); \n\n digitalWrite(trigPinfl, LOW);\n delayMicroseconds(5);\n digitalWrite(trigPinfl, HIGH);\n delayMicroseconds(10);\n digitalWrite(trigPinfl, LOW);\n durationfl = pulseIn(echoPinfl, HIGH);\n\n\n digitalWrite(trigPinfr, LOW);\n delayMicroseconds(5);\n digitalWrite(trigPinfr, HIGH);\n delayMicroseconds(10);\n digitalWrite(trigPinfr, LOW);\n durationfr = pulseIn(echoPinfr, HIGH);\n\n\n distancefl = durationfl * 0.034 / 2;\n distancefr = durationfr * 0.034 / 2;\n\n Serial.print(\"usfld:\");\n Serial.print(distancefl);\n Serial.print(\"|\");\n\n Serial.print(\"usfrd:\");\n Serial.print(distancefr);\n Serial.println();\n\n\n delay(100);\n}\n" }, { "alpha_fraction": 0.6349650621414185, "alphanum_fraction": 0.6650349497795105, "avg_line_length": 18.053333282470703, "blob_id": "e848939ba30e268eb4761a395243c0b520e479e9", "content_id": "a053fc4c98689b73404250bd8c6352902c6b1872", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1430, "license_type": "no_license", "max_line_length": 62, "num_lines": 75, "path": "/arc/ar_3/ar_3.ino", "repo_name": "soumalipal/BTP", "src_encoding": "UTF-8", "text": "\nconst int trigPinbl = 12;\nconst int echoPinbl = 11;\n\n\nconst int trigPinbr = 10;\nconst int echoPinbr = 9;\n\nlong durationbl;\nint distancebl;\n\n\nlong durationbr;\nint distancebr;\n\n int x=-2000,y=-6000;\n\nvoid setup() {\n pinMode(trigPinbl, OUTPUT); // Sets the trigPin as an Output\n pinMode(echoPinbl, INPUT); // Sets the echoPin as an Input\n\n\n pinMode(trigPinbr, OUTPUT); // Sets the trigPin as an Output\n pinMode(echoPinbr, INPUT); // Sets the echoPin as an Input\n\n pinMode(LED_BUILTIN, OUTPUT);\n\n Serial.begin(9600); // Starts the serial communication\n while(true)\n {\n if (Serial.available() > 0) {\n char c = Serial.read();\n if(c=='x') break;\n }\n Serial.print(\"3\");\n Serial.println();\n \n delay(100); \n }\n \n}\nvoid loop() {\n\n digitalWrite(LED_BUILTIN, HIGH); \n\n digitalWrite(trigPinbl, LOW);\n delayMicroseconds(5);\n digitalWrite(trigPinbl, HIGH);\n delayMicroseconds(10);\n digitalWrite(trigPinbl, LOW);\n durationbl = pulseIn(echoPinbl, HIGH);\n\n\n digitalWrite(trigPinbr, LOW);\n delayMicroseconds(5);\n digitalWrite(trigPinbr, HIGH);\n delayMicroseconds(10);\n digitalWrite(trigPinbr, LOW);\n durationbr = pulseIn(echoPinbr, HIGH);\n\n\n distancebl = durationbl * 0.034 / 2;\n distancebr = durationbr * 0.034 / 2;\n\n\n Serial.print(\"usbld:\");\n Serial.print(distancebl);\n Serial.print(\"|\");\n\n Serial.print(\"usbrd:\");\n Serial.print(distancebr);\n Serial.println(); \n \n\n delay(100);\n}\n" }, { "alpha_fraction": 0.7976878881454468, "alphanum_fraction": 0.7976878881454468, "avg_line_length": 28, "blob_id": "14c6f9c8b5a10e7ab88a3995c9ddf33bd3f10719", "content_id": "0d3ba74e2f859752907c3d81c1bd0691e56b671d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 173, "license_type": "no_license", "max_line_length": 75, "num_lines": 6, "path": "/README.md", "repo_name": "soumalipal/BTP", "src_encoding": "UTF-8", "text": "# BTP\nThis repo contains code for solareon which is also my B.Tech project.\n\nAssumptions were made regarding my capabilities to procure the said degree.\n\nYou've been warned." } ]
11
goodpupil/TimeSeries_Classification_with_RandomForest
https://github.com/goodpupil/TimeSeries_Classification_with_RandomForest
af05526e24ea8d7af2ec8799455dc32fda7022de
9f31bc08ca8eb96ea93535b98ef52a383199cd09
aab04c79239deaa4920a0dd203f1b5bb09bce723
refs/heads/master
2022-04-08T10:50:16.618177
2020-03-05T13:38:36
2020-03-05T13:38:36
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8439716100692749, "alphanum_fraction": 0.8439716100692749, "avg_line_length": 69.5, "blob_id": "275ce81da226560bbfc4599707f6c1111ca99920", "content_id": "e47d5d1750e8c23dd2b1fd046e97857b51f363c3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 141, "license_type": "no_license", "max_line_length": 94, "num_lines": 2, "path": "/README.md", "repo_name": "goodpupil/TimeSeries_Classification_with_RandomForest", "src_encoding": "UTF-8", "text": "# TimeSeries Classification with RandomForest\nA Time Series classification demo using a RandomForest model on a dataset shuffled five times.\n" }, { "alpha_fraction": 0.6933430433273315, "alphanum_fraction": 0.7078937888145447, "avg_line_length": 42.63492202758789, "blob_id": "9af861dc4b16bc1a2534ede5348b02e6ac5126d0", "content_id": "5b8507ce91932f363e4911cb6b7c241c7cc3b70b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2749, "license_type": "no_license", "max_line_length": 124, "num_lines": 63, "path": "/main_rf.py", "repo_name": "goodpupil/TimeSeries_Classification_with_RandomForest", "src_encoding": "UTF-8", "text": "import numpy as np\nimport os\nimport constants\nfrom sklearn.model_selection import GridSearchCV, PredefinedSplit\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn import metrics\nfrom sklearn.externals import joblib\n\n\"\"\"\n Training and evaluating Random Forest classification metrics using a shuffled dataset in 5 folds of data\n\"\"\"\nN_FOLDS = 5\nfor i in range(N_FOLDS):\n\n current_fold = str(i + 1)\n train_fn = '[YOUR TRAIN DATA FILE PATH HERE]'\n validation_fn = '[YOUR VALIDATION DATA FILE PATH HERE]'\n test_fn = '[YOUR TEST DATA FILE PATH HERE]'\n target_train_fn = '[YOUR TRAIN LABELS FILE PATH HERE]'\n target_validation_fn = '[YOUR VALIDATION LABELS FILE PATH HERE]'\n target_test_fn = '[YOUR TEST LABELS FILE PATH HERE]'\n\n # loading the data already splitted\n x_train = np.load(train_fn)\n x_validation = np.load(validation_fn)\n x_test = np.load(test_fn)\n y_train = np.argmax(np.load(target_train_fn),axis=1)\n y_validation = np.argmax(np.load(target_validation_fn),axis=1)\n y_test = np.argmax(np.load(target_test_fn), axis=1)\n\n # parameter definition for tuning\n tuned_parameters = {'n_estimators': [200,300,400,500],\n 'max_depth': [None, 20,40,60,80,100]}\n\n # model creation\n clf = RandomForestClassifier()\n\n # merging train and validation test for the optimization step\n x_train_validation = np.concatenate((x_train,x_validation),axis=0)\n y_train_validation = np.concatenate((y_train, y_validation), axis=0)\n\n # split_index contains -1 indicating the sample is for training; otherwise 0 for validation\n split_index = [-1 if x in range(len(x_train)) else 0 for x in range(len(x_train_validation))]\n\n # using the split_index list to define the split criteria for the cross-validation process during the model optimization\n ps = PredefinedSplit(test_fold = split_index)\n\n # Optimization step. Setting n_jobs = -1 enables parallel execution on all processors\n grid_search = GridSearchCV(estimator = clf, param_grid = tuned_parameters, cv = ps, n_jobs =-1, verbose = 2)\n # training of the random forest classifier\n grid_search.fit(x_train_validation, y_train_validation)\n # test predictions\n y_pred = grid_search.predict(x_test)\n print(\"Metrics fold \", current_fold)\n print(\"Accuracy: \", metrics.accuracy_score(y_test, y_pred))\n print(\"F-score: \", metrics.f1_score(y_test, y_pred, average='macro'))\n print(\"K-score: \", metrics.cohen_kappa_score(y_test, y_pred))\n\n # best model saving in a single file\n outputFolder = constants.OUTPUT_DIR_PATH + current_fold\n if not os.path.exists(outputFolder):\n os.makedirs(outputFolder)\n joblib.dump(grid_search.best_estimator_, outputFolder+'/best_model.pkl', compress=1)\n" } ]
2
ramonsalau/BookStore-Management-System
https://github.com/ramonsalau/BookStore-Management-System
29bb9e13d47ca6887dfd7e9e35ccc2e16254574b
2e6f25d5ca3c6e0312369e01ba6c4aa7491584f0
870df0a37f1f4e90d61df97e3aa1c81c6bdb7d13
refs/heads/master
2020-03-28T02:11:30.744384
2018-09-05T17:18:36
2018-09-05T17:18:36
147,553,794
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7558139562606812, "alphanum_fraction": 0.7848837375640869, "avg_line_length": 85, "blob_id": "67fc94a420b6e905a2a454ab824e451210a85776", "content_id": "67fd6f6bbd0772f289b41031b7690cd93ab4396e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 172, "license_type": "no_license", "max_line_length": 141, "num_lines": 2, "path": "/README.md", "repo_name": "ramonsalau/BookStore-Management-System", "src_encoding": "UTF-8", "text": "# BookStore-Management-System\nThis program is basic program written in python 2.7 to manage books in a bookstore. PyQt4 for python 2.7 has to be installed to run the GUI.\n" }, { "alpha_fraction": 0.6130995750427246, "alphanum_fraction": 0.6442544460296631, "avg_line_length": 42.48387145996094, "blob_id": "6a2e5c30ec9f807938f2aa44e4f1f1645dfdce2c", "content_id": "81d4cbfa56b038b4b04a2eef7014f5a8c9a1d486", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6901, "license_type": "no_license", "max_line_length": 157, "num_lines": 155, "path": "/BookStoreManagement.py", "repo_name": "ramonsalau/BookStore-Management-System", "src_encoding": "UTF-8", "text": "import sqlite3\r\nfrom PyQt4 import QtCore, QtGui\r\n\r\n#Create and connect database\r\nconn = sqlite3.connect(\"books.db\")\r\nc = conn.cursor()\r\n\r\n\r\ntry:\r\n _fromUtf8 = QtCore.QString.fromUtf8\r\nexcept AttributeError:\r\n def _fromUtf8(s):\r\n return s\r\n\r\ntry:\r\n _encoding = QtGui.QApplication.UnicodeUTF8\r\n def _translate(context, text, disambig):\r\n return QtGui.QApplication.translate(context, text, disambig, _encoding)\r\nexcept AttributeError:\r\n def _translate(context, text, disambig):\r\n return QtGui.QApplication.translate(context, text, disambig)\r\n\r\nclass Ui_Form(object):\r\n def setupUi(self, Form):\r\n Form.setObjectName(_fromUtf8(\"Form\"))\r\n Form.resize(830, 673)\r\n self.Main_Frame = QtGui.QFrame(Form)\r\n self.Main_Frame.setEnabled(True)\r\n self.Main_Frame.setGeometry(QtCore.QRect(10, 20, 801, 581))\r\n self.Main_Frame.setFrameShape(QtGui.QFrame.NoFrame)\r\n self.Main_Frame.setFrameShadow(QtGui.QFrame.Raised)\r\n self.Main_Frame.setObjectName(_fromUtf8(\"Main_Frame\"))\r\n self.table = QtGui.QTableWidget(self.Main_Frame)\r\n self.table.setGeometry(QtCore.QRect(0, 0, 541, 581))\r\n self.table.setObjectName(_fromUtf8(\"table\"))\r\n self.table.setColumnCount(4)\r\n self.table.setRowCount(0)\r\n item = QtGui.QTableWidgetItem()\r\n self.table.setHorizontalHeaderItem(0, item)\r\n item = QtGui.QTableWidgetItem()\r\n self.table.setHorizontalHeaderItem(1, item)\r\n item = QtGui.QTableWidgetItem()\r\n self.table.setHorizontalHeaderItem(2, item)\r\n item = QtGui.QTableWidgetItem()\r\n self.table.setHorizontalHeaderItem(3, item)\r\n self.add_button = QtGui.QPushButton(self.Main_Frame)\r\n self.add_button.setGeometry(QtCore. QRect(640, 230, 75, 23))\r\n self.add_button.setObjectName(_fromUtf8(\"add_button\"))\r\n self.label_2 = QtGui.QLabel(self.Main_Frame)\r\n self.label_2.setGeometry(QtCore.QRect(580, 50, 61, 21))\r\n font = QtGui.QFont()\r\n font.setPointSize(10)\r\n self.label_2.setFont(font)\r\n self.label_2.setObjectName(_fromUtf8(\"label_2\"))\r\n self.label_3 = QtGui.QLabel(self.Main_Frame)\r\n self.label_3.setGeometry(QtCore.QRect(580, 20, 211, 21))\r\n font = QtGui.QFont()\r\n font.setPointSize(10)\r\n font.setBold(True)\r\n font.setWeight(75)\r\n self.label_3.setFont(font)\r\n self.label_3.setObjectName(_fromUtf8(\"label_3\"))\r\n self.label_4 = QtGui.QLabel(self.Main_Frame)\r\n self.label_4.setGeometry(QtCore.QRect(580, 110, 71, 21))\r\n font = QtGui.QFont()\r\n font.setPointSize(10)\r\n self.label_4.setFont(font)\r\n self.label_4.setObjectName(_fromUtf8(\"label_4\"))\r\n self.label_5 = QtGui.QLabel(self.Main_Frame)\r\n self.label_5.setGeometry(QtCore.QRect(580, 170, 111, 21))\r\n font = QtGui.QFont()\r\n font.setPointSize(10)\r\n self.label_5.setFont(font)\r\n self.label_5.setObjectName(_fromUtf8(\"label_5\"))\r\n self.title_text = QtGui.QLineEdit(self.Main_Frame)\r\n self.title_text.setGeometry(QtCore.QRect(580, 70, 201, 21))\r\n self.title_text.setObjectName(_fromUtf8(\"title_text\"))\r\n self.author_text = QtGui.QLineEdit(self.Main_Frame)\r\n self.author_text.setGeometry(QtCore.QRect(580, 130, 201, 21))\r\n self.author_text.setObjectName(_fromUtf8(\"author_text\"))\r\n self.year_text = QtGui.QLineEdit(self.Main_Frame)\r\n self.year_text.setGeometry(QtCore.QRect(580, 190, 101, 21))\r\n self.year_text.setObjectName(_fromUtf8(\"year_text\"))\r\n self.table.raise_()\r\n self.add_button.raise_()\r\n self.label_2.raise_()\r\n self.label_3.raise_()\r\n self.label_4.raise_()\r\n self.label_5.raise_()\r\n self.title_text.raise_()\r\n self.author_text.raise_()\r\n self.year_text.raise_()\r\n self.del_button = QtGui.QPushButton(Form)\r\n self.del_button.setGeometry(QtCore.QRect(260, 640, 75, 23))\r\n self.del_button.setObjectName(_fromUtf8(\"del_button\"))\r\n self.del_ID_text = QtGui.QLineEdit(Form)\r\n self.del_ID_text.setGeometry(QtCore.QRect(140, 640, 111, 21))\r\n self.del_ID_text.setObjectName(_fromUtf8(\"del_ID_text\"))\r\n self.label = QtGui.QLabel(Form)\r\n self.label.setGeometry(QtCore.QRect(20, 640, 121, 21))\r\n self.label.setObjectName(_fromUtf8(\"label\"))\r\n self.retranslateUi(Form)\r\n QtCore.QMetaObject.connectSlotsByName(Form)\r\n\r\n def retranslateUi(self, Form):\r\n Form.setWindowTitle(_translate(\"Form\", \"Book Store Management System\", None))\r\n self.table.setSortingEnabled(True)\r\n self.add_button.setText(_translate(\"Form\", \"Add Book\", None))\r\n self.label_2.setText(_translate(\"Form\", \"Book Title\", None))\r\n self.label_3.setText(_translate(\"Form\", \"Enter Details of the book to add\", None))\r\n self.label_4.setText(_translate(\"Form\", \"Book Author\", None))\r\n self.label_5.setText(_translate(\"Form\", \"Year of Publsihing\", None))\r\n self.del_button.setText(_translate(\"Form\", \"Delete Book\", None))\r\n self.label.setText(_translate(\"Form\", \"Enter Book ID to delete\", None))\r\n\r\n#update table for every change in database\r\ndef update_table():\r\n ui.table.clear()\r\n ui.table.setRowCount(0)\r\n rows = list(c.execute(\"SELECT * FROM books\"))\r\n for rowPosition in range(len(rows)):\r\n ui.table.insertRow(rowPosition)\r\n ui.table.setItem(rowPosition , 0, QtGui.QTableWidgetItem(rows[rowPosition][0]))\r\n ui.table.setItem(rowPosition , 1, QtGui.QTableWidgetItem(str(rows[rowPosition][1])))\r\n ui.table.setItem(rowPosition , 2, QtGui.QTableWidgetItem(rows[rowPosition][2]))\r\n ui.table.setItem(rowPosition , 3, QtGui.QTableWidgetItem(rows[rowPosition][3]))\r\n ui.table.setHorizontalHeaderItem(0, QtGui.QTableWidgetItem(\"Book Title\"))\r\n ui.table.setHorizontalHeaderItem(1, QtGui.QTableWidgetItem(\"Book ID\"))\r\n ui.table.setHorizontalHeaderItem(2, QtGui.QTableWidgetItem(\"Book Author\"))\r\n ui.table.setHorizontalHeaderItem(3, QtGui.QTableWidgetItem(\"Year of Publishing\"))\r\n\r\n#add book to database\r\ndef add_book():\r\n c.execute(\"INSERT INTO books(book_title,book_author,book_year) VALUES('%s','%s','%s')\" %(ui.title_text.text(),ui.author_text.text(),ui.year_text.text()))\r\n conn.commit()\r\n update_table()\r\n\r\n#delete book from database\r\ndef del_book():\r\n book_id = ui.del_ID_text.text()\r\n c.execute(\"DELETE FROM books WHERE book_id = '%s'\" % book_id)\r\n conn.commit()\r\n update_table()\r\n\r\nif __name__ == \"__main__\":\r\n import sys\r\n app = QtGui.QApplication(sys.argv)\r\n Form = QtGui.QWidget()\r\n ui = Ui_Form()\r\n ui.setupUi(Form)\r\n ui.add_button.clicked.connect(add_book)\r\n ui.del_button.clicked.connect(del_book)\r\n Form.show()\r\n update_table()\r\n sys.exit(app.exec_())\r\n \r\n" } ]
2
matbur/dnn
https://github.com/matbur/dnn
1e4facd9864218ac7483b4912020e18f23c50811
6379618f23069fa6e0e4c0e20de4d776e70b7e33
00e137e73d6846171aa4c2e58aa2b8f3d2d2944a
refs/heads/master
2022-12-14T03:44:33.037363
2019-10-22T07:33:55
2019-10-22T07:33:55
155,981,107
0
0
MIT
2018-11-03T12:29:22
2019-10-22T07:33:58
2022-12-08T06:35:03
Python
[ { "alpha_fraction": 0.5589873194694519, "alphanum_fraction": 0.5635443329811096, "avg_line_length": 24.483871459960938, "blob_id": "6f8ead5a8ee263d435455928ae326026d4698698", "content_id": "634229a9af87eccfb756d3daf6fcfafe91320b35", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3950, "license_type": "permissive", "max_line_length": 90, "num_lines": 155, "path": "/sdnn/layers.py", "repo_name": "matbur/dnn", "src_encoding": "UTF-8", "text": "from pathlib import Path\nfrom typing import Optional, Tuple\n\nimport numpy as np\n\nfrom . import activation as act\nfrom .logger import create_logger\nfrom .schemas import NetworkSchema\n\nlogger = create_logger(\n __name__,\n # con_level='DEBUG',\n file_level='INFO',\n filename=Path(__file__).with_suffix('.log'),\n)\n\n\nclass Layer:\n id = 0\n\n def __init__(self, shape, activation='sigmoid'):\n self.shape = shape\n self.n_inputs = shape[0]\n self.n_outputs = shape[1]\n\n self.activation_name = activation\n self.activation = self.parse_activation()\n\n self.tab: np.ndarray = None\n if shape[0] is not None:\n self.tab = np.random.rand(shape[0] + 1, shape[1]) - .5\n # self.tab = next(weights)\n\n self.previous: Layer = None\n self.next: Layer = None\n\n self.is_first = False\n self.is_last = False\n\n self.y: np.ndarray = None\n self.z: np.ndarray = None\n self.delta: np.ndarray = None\n self.gradient: np.ndarray = None\n\n self.id = Layer.id\n Layer.id += 1\n\n @property\n def W(self):\n if self.tab is None:\n return None\n return self.tab[1:]\n\n @property\n def b(self):\n if self.tab is None:\n return None\n return self.tab[:1]\n\n def parse_activation(self):\n return getattr(act, self.activation_name)\n\n @staticmethod\n def _add_bias(arr: np.ndarray):\n return np.c_[np.ones(arr.shape[0]), arr]\n\n def feedforward(self, x: np.ndarray) -> np.ndarray:\n if self.is_first:\n # if x is 1-D vector, add dimension\n if len(x.shape) == 1:\n x = x[np.newaxis]\n x = self._add_bias(x)\n self.y = x\n return self.next.feedforward(x)\n\n logger.debug('Layer {.id}: got input\\n{!r}'.format(self, x))\n z = x @ self.tab\n y = self.activation(z)\n logger.debug('Layer {.id}: returns\\n{!r}'.format(self, y))\n\n if not self.is_last:\n y = self._add_bias(y)\n self.y = y\n self.z = z\n\n if self.is_last:\n return y\n return self.next.feedforward(y)\n\n def calc_delta(self, d: np.ndarray = None):\n if self.is_first:\n return\n\n if self.is_last:\n d = d[None]\n delta = (self.y - d) * self.activation(self.z, True)\n self.delta = delta\n return self.previous.calc_delta()\n\n delta = (self.next.delta[0] @ self.next.tab.T)[1:] * self.activation(self.z, True)\n self.delta = delta\n\n self.previous.calc_delta()\n\n def calc_gradient(self):\n if self.is_first:\n return\n\n gradient = self.previous.y.T @ self.delta\n self.gradient = gradient\n\n self.previous.calc_gradient()\n\n def update_weights(self, learning_rate=.2):\n if self.is_first:\n return\n\n self.tab -= self.gradient * learning_rate\n\n self.previous.update_weights(learning_rate)\n\n def load(self, tabs: NetworkSchema):\n if self.is_first:\n return\n\n tab = np.array(tabs.pop())\n assert tab.shape == self.tab.shape, f'{tab.shape} != {self.tab.shape}'\n self.tab = tab\n\n self.previous.load(tabs)\n\n def __repr__(self):\n if None in self.shape:\n return f'Layer {self.id}: shape:{self.shape}'\n return f'Layer {self.id}: tab:{self.tab.shape}\\n{self.tab} = tab'\n\n\ndef input_data(shape: Tuple[Optional[int], int]) -> Layer:\n layer = Layer(shape)\n layer.is_first = True\n return layer\n\n\ndef fully_connected(incoming: Layer, n_units: int, activation='relu') -> Layer:\n shape = (incoming.n_outputs, n_units)\n layer = Layer(shape, activation)\n layer.previous = incoming\n layer.is_last = True\n incoming.next = layer\n incoming.is_last = False\n return layer\n\n\ndef dropout(incoming: Layer, keep_prob=.8) -> Layer:\n pass\n" }, { "alpha_fraction": 0.8067227005958557, "alphanum_fraction": 0.8067227005958557, "avg_line_length": 22.799999237060547, "blob_id": "73eb35cf52356c8ccd3bc025ff8283beeec75a1c", "content_id": "8349256563b8b4c78b3b7cdafe91df9d453f14f3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 119, "license_type": "permissive", "max_line_length": 33, "num_lines": 5, "path": "/sdnn/schemas.py", "repo_name": "matbur/dnn", "src_encoding": "UTF-8", "text": "from typing import List\n\nNeuronSchema = List[float]\nLayerSchema = List[NeuronSchema]\nNetworkSchema = List[LayerSchema]\n" }, { "alpha_fraction": 0.4543147087097168, "alphanum_fraction": 0.5279187560081482, "avg_line_length": 24.419355392456055, "blob_id": "c7b2ba96a547a5775204925463deba95353168d3", "content_id": "2bd7979ea2582522bdbd32f0963220de582c01ab", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 788, "license_type": "permissive", "max_line_length": 50, "num_lines": 31, "path": "/sdnn/tests/test_activation.py", "repo_name": "matbur/dnn", "src_encoding": "UTF-8", "text": "from inz.activation import relu\n\nimport numpy as np\n\n\ndef test_relu_matrix():\n arr = np.array([[1, -1, .5, -.5, 1e-6, -1e6]])\n want = np.array([[1, 0, .5, 0, 1e-6, 0]])\n get = relu(arr)\n assert np.array_equal(want, get)\n\n\ndef test_relu_vector():\n arr = np.array([1, -1, .5, -.5, 1e-6, -1e6])\n want = np.array([1, 0, .5, 0, 1e-6, 0])\n get = relu(arr)\n assert np.array_equal(want, get)\n\n\ndef test_relu_der_matrix():\n arr = np.array([[1, -1, .5, -.5, 1e-6, -1e6]])\n want = np.array([[1., 0., 1., 0., 1., 0.]])\n get = relu(arr, True)\n assert np.array_equal(want, get)\n\n\ndef test_relu_der_vector():\n arr = np.array([1, -1, .5, -.5, 1e-6, -1e6])\n want = np.array([1., 0., 1., 0., 1., 0.])\n get = relu(arr, True)\n assert np.array_equal(want, get)\n" }, { "alpha_fraction": 0.50737464427948, "alphanum_fraction": 0.5319567322731018, "avg_line_length": 16.534482955932617, "blob_id": "8b9f7e44ee76cb011d7833c868fb5edc1e37063e", "content_id": "1d78dd533c294ebfd39658fcfa913858b11b07e8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1017, "license_type": "permissive", "max_line_length": 57, "num_lines": 58, "path": "/sdnn/activation.py", "repo_name": "matbur/dnn", "src_encoding": "UTF-8", "text": "\"\"\" This module contains all activation functions.\n\nsource: https://en.wikipedia.org/wiki/Activation_function\n\"\"\"\n\nimport numpy as np\n\n\ndef identity(x, der=False):\n if der:\n return 1\n return x\n\n\ndef binary_step(x, der=False):\n if der:\n if x == 0:\n raise ValueError('?')\n return 0\n\n return x >= 0\n\n\ndef sigmoid(x, der=False):\n if der:\n return np.exp(x) / (1 + np.exp(x)) ** 2\n return 1 / (1 + np.exp(-x))\n\n\ndef tanh(x, der=False):\n if der:\n return 1 - tanh(x) ** 2\n return np.tanh(x)\n\n\ndef arctan(x, der=False):\n if der:\n return 1 / (1 + x ** 2)\n return np.arctan(x)\n\n\ndef soft_sign(x, der=False):\n if der:\n return 1 / (1 + abs(x)) ** 2\n return x / (1 + abs(x))\n\n\ndef relu(x, der=False):\n if der:\n return x * (x > 0) / x\n return np.maximum(0, x)\n\n\ndef softmax(x):\n rows_max = np.max(x, axis=1).reshape(-1, 1)\n e_x = np.exp(x - rows_max)\n div = np.sum(e_x, axis=1).reshape(-1, 1)\n return e_x / div\n" }, { "alpha_fraction": 0.5284959077835083, "alphanum_fraction": 0.5470035076141357, "avg_line_length": 25.184616088867188, "blob_id": "f33753eab501b1c566ba83d28539566a3a553930", "content_id": "59936aee2260687449bfabf5ec14f05301eda54b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3404, "license_type": "permissive", "max_line_length": 105, "num_lines": 130, "path": "/main.py", "repo_name": "matbur/dnn", "src_encoding": "UTF-8", "text": "from datetime import datetime\nfrom multiprocessing import cpu_count\nfrom multiprocessing.pool import Pool\nfrom pathlib import Path\n\nimport numpy as np\n\nfrom sdnn import Model, fully_connected, input_data\nfrom sdnn.utils import get_data, train_test_split, vector2onehot\n\n\ndef xor_problem():\n np.random.seed(1)\n x = np.array([\n [1, 1], [1, 0], [0, 1], [0, 0],\n ], dtype=float)\n y = np.array([\n [1, 0], [0, 1], [0, 1], [1, 0],\n ], dtype=float)\n\n net = input_data((None, 2))\n net = fully_connected(net, 3, activation='tanh')\n net = fully_connected(net, 2, activation='tanh')\n\n model = Model(net)\n model.fit(x, y, n_epoch=200)\n model.save('xor_model.json')\n model.load('xor_model.json')\n\n for i in zip(y, model.predict(x)):\n print(*i)\n\n model.plot_error()\n\n\ndef get_accuracy(pred, y):\n axis_ = np.argmax(pred, axis=1) - np.argmax(y, axis=1)\n return 1 - np.count_nonzero(axis_) / len(y)\n\n\ndef create_network(n, shapes, activation, seed=42):\n np.random.seed(seed)\n\n net = input_data(shape=(None, n))\n for i in shapes:\n net = fully_connected(net, i, activation)\n return net\n\n\ndef test_case(shapes, activation, n_features, batch_size, learning_rate, n_epoch, model_dir, seed=42):\n name = 's_{}_a_{}_f_{}_bs_{}_lr_{}'.format(\n '_'.join(map(str, shapes)), activation, n_features, batch_size, '_'.join(map(str, learning_rate))\n )\n print(name)\n network = create_network(n_features, shapes, activation, seed=seed)\n\n data = get_data(n_features)\n\n train, test = train_test_split(data, seed=seed)\n\n x_train = train[:, :-1]\n y_train = train[:, -1] - 1\n y_train = vector2onehot(y_train)\n x_test = test[:, :-1]\n y_test = test[:, -1] - 1\n y_test = vector2onehot(y_test)\n\n model = Model(network)\n model.fit(x_train, y_train,\n validation_set=(x_test, y_test),\n n_epoch=n_epoch,\n batch_size=batch_size,\n learning_rate=learning_rate,\n train_file=f'{model_dir}/{name}_train.json',\n )\n\n model_fn = f'{model_dir}/{name}_model.json'\n model.save(model_fn)\n # model.load(model_fn)\n\n # for i, j in zip(model.predict(x_test), y_test):\n # print(np.argmax(i), np.argmax(j))\n\n print(get_accuracy(model.predict(x_test), y_test))\n\n # model.plot_error()\n\n\ndef wrapper(x):\n return test_case(**x)\n\n\ndef prepare_test_cases():\n model_dir = Path('results') / datetime.now().strftime('%s')\n Path(model_dir).mkdir(exist_ok=True, parents=True)\n for act in ['sigmoid']:\n for feat in [30]:\n for shape in [[16, 12, 8]]:\n for lr in [[.2, .01], [.2, .001]]:\n yield {\n 'shapes': shape,\n 'activation': act,\n 'n_features': feat,\n 'batch_size': 10,\n 'learning_rate': lr,\n 'n_epoch': 10,\n 'model_dir': model_dir,\n }\n\n\ndef run_all():\n np.random.seed(42)\n\n cpus = min(cpu_count(), 16)\n cases = prepare_test_cases()\n\n print(f'Running on {cpus} CPUs')\n\n with Pool(cpus) as pool:\n pool.map(wrapper, cases)\n\n\nif __name__ == '__main__':\n from time import time\n\n t0 = time()\n run_all()\n # xor_problem()\n t = time() - t0\n print(f'Done in {t} s')\n" }, { "alpha_fraction": 0.6589229106903076, "alphanum_fraction": 0.6589229106903076, "avg_line_length": 25.30555534362793, "blob_id": "b2ad7c0eacecd83e911b87307d0b3b9f8e5709d3", "content_id": "2862e89cc18cd26625bfa05e9e41ad2ffa91e09e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 947, "license_type": "permissive", "max_line_length": 89, "num_lines": 36, "path": "/sdnn/logger.py", "repo_name": "matbur/dnn", "src_encoding": "UTF-8", "text": "import logging\n\n\ndef get_level(level: str):\n level = level.upper()\n return getattr(logging, level)\n\n\ndef create_logger(name: str = None, con_level='INFO', file_level='DEBUG', filename=None):\n con_level = get_level(con_level)\n file_level = get_level(file_level)\n\n fmt = '%(asctime)s|%(name)s|%(funcName)s:%(lineno)d|%(levelname)s|%(message)s'\n\n formatter = logging.Formatter(fmt)\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG)\n\n ch = logging.StreamHandler()\n ch.setFormatter(formatter)\n ch.setLevel(con_level)\n logger.addHandler(ch)\n\n if filename is not None:\n fh = logging.FileHandler(filename)\n fh.setFormatter(formatter)\n fh.setLevel(file_level)\n logger.addHandler(fh)\n\n logger.debug('New logger created')\n return logger\n\n\nif __name__ == '__main__':\n logger = create_logger(con_level='DEBUG', filename='somefile.log')\n logger.debug('hello world')\n" }, { "alpha_fraction": 0.564338207244873, "alphanum_fraction": 0.6047794222831726, "avg_line_length": 17.133333206176758, "blob_id": "fe67760c95d23f95d8df4e9f227bc4bcffcdc2b7", "content_id": "0bc309e758fda837f33425a5d58ce5344223ccdc", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 544, "license_type": "permissive", "max_line_length": 62, "num_lines": 30, "path": "/README.md", "repo_name": "matbur/dnn", "src_encoding": "UTF-8", "text": "# dnn\n\n## Usage\n\n```bash\npip install sdnn\n```\n\n```python\nimport numpy as np\n\nimport sdnn\n\nx = np.array([ [1, 1], [1, 0], [0, 1], [0, 0], ], dtype=float)\ny = np.array([ [1, 0], [0, 1], [0, 1], [1, 0], ], dtype=float)\n\nnet = sdnn.input_data((None, 2))\nnet = sdnn.fully_connected(net, 3, activation='tanh')\nnet = sdnn.fully_connected(net, 2, activation='tanh')\n\nmodel = sdnn.Model(net)\nmodel.fit(x, y, n_epoch=200)\nmodel.save('xor_model.json')\nmodel.load('xor_model.json')\n\nfor i in zip(y, model.predict(x)):\n print(*i)\n\nmodel.plot_error()\n```\n" }, { "alpha_fraction": 0.5173267126083374, "alphanum_fraction": 0.5504950284957886, "avg_line_length": 24.732484817504883, "blob_id": "d38d0b319111ba4755b17adc0837c796072582c4", "content_id": "648a55e4d03e0a1260cec1b579e77e89a43b04dd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4040, "license_type": "permissive", "max_line_length": 69, "num_lines": 157, "path": "/sdnn/tests/test_utils.py", "repo_name": "matbur/dnn", "src_encoding": "UTF-8", "text": "import numpy as np\nimport pandas as pd\nimport pytest\nfrom sklearn.feature_selection import SelectKBest, chi2 as sk_chi2\n\nfrom inz.utils import chi2, select_k_best, split, train_test_split\n\n\ndef test_split_list_int():\n ints = list(range(7))\n want = [[0, 1, 2], [3, 4, 5], [6]]\n get = list(split(ints, 3))\n assert len(get) == len(want)\n assert get == want\n\n\ndef test_split_int():\n ints = range(7)\n want = [[0, 1, 2], [3, 4, 5], [6]]\n get = list(split(ints, 3))\n assert len(get) == len(want)\n assert get == want\n\n\ndef test_split_list_int_greater_width():\n ints = list(range(3))\n want = [[0, 1, 2]]\n get = list(split(ints, 4))\n assert len(get) == len(want)\n assert get == want\n\n\ndef test_split_list_str():\n strings = list(map(str, range(6)))\n want = [['0', '1'], ['2', '3'], ['4', '5']]\n get = list(split(strings, 2))\n assert len(get) == len(want)\n assert get == want\n\n\ndef test_str():\n string = ''.join(map(str, range(6)))\n want = [['0', '1'], ['2', '3'], ['4', '5']]\n get = list(split(string, 2))\n assert len(get) == len(want)\n assert get == want\n\n\ndef test_split_ndarray_int():\n array = np.arange(10, dtype=int).reshape(-1, 2)\n want = [np.array([[0, 1], [2, 3]]),\n np.array([[4, 5], [6, 7]]),\n np.array([[8, 9]])]\n get = list(split(array, 2))\n assert len(get) == len(want)\n for i, j in zip(get, want):\n assert type(i) == type(j)\n assert np.array_equal(i, j)\n\n\ndef test_split_generator_str():\n strings = map(str, range(6))\n want = [['0', '1'], ['2', '3'], ['4', '5']]\n get = list(split(strings, 2))\n assert len(get) == len(want)\n assert get == want\n\n\ndef test_split_list_int_not_allow():\n ints = list(range(7))\n want = [[0, 1, 2], [3, 4, 5]]\n get = list(split(ints, 3, False))\n assert len(get) == len(want)\n assert get == want\n\n\ndef test_split_list_int_greater_width_not_allow():\n ints = list(range(3))\n want = []\n get = list(split(ints, 4, False))\n assert len(get) == len(want)\n assert get == want\n\n\ndef test_split_list_str_not_allow():\n strings = list(map(str, range(6)))\n want = [['0', '1'], ['2', '3'], ['4', '5']]\n get = list(split(strings, 2, False))\n assert len(get) == len(want)\n assert get == want\n\n\ndef test_split_ndarray_int_not_allow():\n array = np.arange(10, dtype=int).reshape(-1, 2)\n want = [np.array([[0, 1], [2, 3]]),\n np.array([[4, 5], [6, 7]])]\n get = list(split(array, 2, False))\n assert len(get) == len(want)\n for i, j in zip(get, want):\n assert type(i) == type(j)\n assert np.array_equal(i, j)\n\n\ndef test_split_generator_str_not_allow():\n strings = map(str, range(6))\n want = [['0', '1'], ['2', '3'], ['4', '5']]\n get = list(split(strings, 2, False))\n assert len(get) == len(want)\n assert get == want\n\n\n@pytest.fixture\ndef data():\n X = pd.read_csv('../../data/data.csv')\n y = X.pop('Choroba')\n return X.values, y.values\n\n\ndef test_chi2(data):\n X, y = data\n sk_val, _ = sk_chi2(X, y)\n my_val = chi2(X, y)\n\n np.testing.assert_equal(sk_val, my_val)\n\n\ndef test_select_k_best(data):\n X, y = data\n for i in range(1, 31):\n sk_sup1 = SelectKBest(sk_chi2, i).fit(X, y).get_support()\n sk_sup2 = SelectKBest(sk_chi2, i).fit(X, y).get_support(True)\n\n my_sup1 = select_k_best(X, y, k=i)\n my_sup2 = select_k_best(X, y, k=i, indices=True)\n\n np.testing.assert_equal(sk_sup1, my_sup1, str(i))\n np.testing.assert_equal(sk_sup2, sorted(my_sup2), str(i))\n\n\ndef test_train_test_split():\n x = np.arange(10)\n get = train_test_split(x, shuffle=False)\n want = [np.arange(7), np.arange(7, 10)]\n for i in zip(get, want):\n np.testing.assert_equal(*i)\n\n\ndef test_train_test_split5():\n x = np.arange(10)\n get = train_test_split(x, test_size=.5, shuffle=False)\n want = [np.arange(5), np.arange(5, 10)]\n for i in zip(get, want):\n np.testing.assert_equal(*i)\n\n\nif __name__ == '__main__':\n pytest.main()\n" }, { "alpha_fraction": 0.800000011920929, "alphanum_fraction": 0.800000011920929, "avg_line_length": 34, "blob_id": "a8fda73c59c47bb35df537c5ef4ee59a6e33f82e", "content_id": "bc50bea173696644e6aab5a636e837b340026233", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 105, "license_type": "permissive", "max_line_length": 47, "num_lines": 3, "path": "/sdnn/__init__.py", "repo_name": "matbur/dnn", "src_encoding": "UTF-8", "text": "from . import activation as act\nfrom .layers import fully_connected, input_data\nfrom .model import Model\n" }, { "alpha_fraction": 0.58591628074646, "alphanum_fraction": 0.5948885083198547, "avg_line_length": 24.020408630371094, "blob_id": "6631c6aa662ff3d23cf0a488421d5605e040233d", "content_id": "e443a1315323bba7776bd7d7c6e33add6d31132d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3678, "license_type": "permissive", "max_line_length": 84, "num_lines": 147, "path": "/sdnn/utils.py", "repo_name": "matbur/dnn", "src_encoding": "UTF-8", "text": "\"\"\" Module contains common functions used in project. \"\"\"\nfrom itertools import islice\nfrom typing import Iterable, Union\n\nimport numpy as np\nimport pandas as pd\n\nfrom .layers import Layer\n\n\ndef old_split(iterable: Iterable, width: int, allow_missing=True):\n \"\"\" Generator yields iterable in parts.\n\n :param iterable: iterable to split\n :param width: length of each part\n :param allow_missing: if True last part may be smaller\n \"\"\"\n it = iter(iterable)\n flag = True\n while flag:\n retval = []\n flag = False\n for _ in range(width):\n try:\n retval.append(next(it))\n except StopIteration:\n if not allow_missing:\n return\n break\n else:\n flag = True\n\n if not retval:\n return\n\n if isinstance(iterable, np.ndarray):\n retval = np.array(retval)\n\n yield retval\n\n\ndef split(iterable: Iterable, width: int, allow_missing=True):\n \"\"\" Generator yields iterable in parts.\n\n :param iterable: iterable to split\n :param width: length of each part\n :param allow_missing: if True last part may be smaller\n \"\"\"\n it = iter(iterable)\n while True:\n retval = list(islice(it, width))\n\n if not retval:\n return\n if len(retval) != width and not allow_missing:\n return\n\n if isinstance(iterable, np.ndarray):\n retval = np.array(retval)\n\n yield retval\n\n\ndef iter_layers(network: Layer, attr, with_values=True, i=0):\n if network is None:\n return\n if i == 0:\n print()\n print(attr)\n values = getattr(network, attr)\n previous = network.previous\n\n shape = values.shape if values is not None else None\n print('Layer: {}, i: {} {}.shape: {}'.format(network.id, i, attr, shape))\n if with_values:\n print(values)\n iter_layers(previous, attr, with_values, i + 1)\n\n\ndef get_loss(pred, y) -> np.ndarray:\n return np.sum((pred - y) ** 2) / 2\n\n\ndef get_accuracy(pred, y):\n axis_ = np.argmax(pred, axis=1) - np.argmax(y, axis=1)\n return 1 - np.count_nonzero(axis_) / len(y)\n\n\ndef vector2onehot(vector: np.ndarray):\n unique = len(set(vector))\n length = len(vector)\n data = np.zeros((length, unique))\n data[range(length), vector] = 1\n return data\n\n\ndef chi2(X: np.ndarray, y: np.ndarray):\n \"\"\"https://pl.wikipedia.org/wiki/Test_zgodno%C5%9Bci_chi-kwadrat#Zliczenia\"\"\"\n\n Y = vector2onehot(y - 1)\n observed = Y.T @ X # n_classes * n_features\n\n feature_count = X.sum(axis=0).reshape(1, -1)\n class_prob = Y.mean(axis=0).reshape(-1, 1)\n expected = class_prob @ feature_count\n\n val = (observed - expected) ** 2 / expected\n return val.sum(axis=0)\n\n\ndef select_k_best(X, y, func=chi2, k=10, indices=False):\n scores = func(X, y)\n mask = np.zeros_like(scores, dtype=bool)\n args = np.argsort(scores)[-k:]\n if indices:\n return args\n mask[args] = 1\n return mask\n\n\ndef train_test_split(*arrays, test_size=.25, shuffle=True, seed: Union[int] = None):\n if seed is not None:\n np.random.seed(seed)\n\n n = len(arrays[0])\n order = np.random.permutation(n) if shuffle else np.arange(n)\n k = int(np.ceil(n * test_size))\n order_train = order[:-k]\n order_test = order[-k:]\n l = []\n for i in arrays:\n l.append(i[order_train])\n l.append(i[order_test])\n return l\n\n\ndef get_data(num_features=20):\n X = pd.read_csv('./data/data.csv')\n y = X.pop('Choroba')\n\n sup = select_k_best(X.values, y.values, k=num_features)\n\n X = X.drop(X.columns[~sup], axis=1)\n\n X['Choroba'] = y\n\n return X.values\n" }, { "alpha_fraction": 0.516984760761261, "alphanum_fraction": 0.5230916142463684, "avg_line_length": 31.147239685058594, "blob_id": "1120e8a2bd7018507447c11700027233b8177362", "content_id": "742fb339dd6124078503866bd2924f9e8a1766a3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5240, "license_type": "permissive", "max_line_length": 114, "num_lines": 163, "path": "/sdnn/model.py", "repo_name": "matbur/dnn", "src_encoding": "UTF-8", "text": "import json\nfrom itertools import repeat\nfrom pathlib import Path\nfrom time import time\nfrom typing import Iterator, Tuple, Union\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom .layers import Layer\nfrom .logger import create_logger\nfrom .schemas import NetworkSchema\nfrom .utils import get_accuracy, get_loss, iter_layers, split\n\nlogger = create_logger(\n __name__,\n con_level='INFO',\n file_level='INFO',\n filename=Path(__file__).with_suffix('.log')\n)\n\n\nclass Model:\n def __init__(self, network: Layer):\n self.network = network\n\n layer = network\n while layer.previous is not None:\n layer = layer.previous\n self.input = layer\n\n self.errors = []\n\n def fit(self, X_inputs: np.ndarray, Y_targets: np.ndarray,\n validation_set: Tuple[np.ndarray, np.ndarray] = None,\n learning_rate=.2, n_epoch=10, batch_size=64,\n shuffle=False, train_file='train.json'):\n\n xlen = len(X_inputs)\n step = 0\n order = np.random.permutation(xlen) if shuffle else np.arange(xlen)\n\n if validation_set is None:\n validation_set = X_inputs, Y_targets\n\n lr_iter = self._parse_learning_rate(learning_rate, n_epoch)\n\n training = []\n testing = []\n for epoch in range(1, n_epoch + 1):\n batches_x = split(X_inputs[order], batch_size)\n batches_y = split(Y_targets[order], batch_size)\n err = []\n num, den = 0, 0\n lr = next(lr_iter)\n for i, (batch_x, batch_y) in enumerate(zip(batches_x, batches_y)):\n step += 1\n t0 = time()\n for x, y in zip(batch_x, batch_y):\n self.input.feedforward(x)\n self.network.calc_delta(y)\n self.network.calc_gradient()\n self.network.update_weights(lr)\n\n predict = self.predict(batch_x)\n e = get_loss(predict, batch_y)\n err.append(e)\n\n l = len(batch_x)\n num += l - np.count_nonzero(predict.argmax(axis=1) - batch_y.argmax(axis=1))\n den += l\n acc = num / den\n iteration = i * batch_size\n\n t = time() - t0\n logger.debug(f'Training Step: {step:<4} | total loss: {e:.5f} | time: {t:.3f}s')\n logger.debug(f'lr: {lr:.3f} |epoch: {epoch:0>3} | acc: {acc:.4f} -- iter: {iteration:0>3}/{xlen}')\n training.append({\n 'step': step,\n 'total_loss': e,\n 'time': t,\n 'epoch': epoch,\n 'accuracy': acc,\n 'iteration': iteration,\n 'len': xlen,\n 'lr': lr,\n })\n\n test_predict = self.predict(validation_set[0])\n test_y = validation_set[1]\n test_acc = get_accuracy(test_predict, test_y)\n test_loss = get_loss(test_predict, test_y)\n\n logger.debug('--')\n logger.info(f'End of epoch: {epoch:0>3} | val_loss: {test_loss:.5f} | val_acc: {test_acc:.4f}')\n logger.debug('--')\n testing.append({\n 'test_loss': test_loss,\n 'epoch': epoch,\n 'test_accuracy': test_acc,\n })\n mean = np.mean(err)\n self.errors.append(mean)\n data = json.dumps({'training': training, 'testing': testing})\n Path(train_file).write_text(data)\n\n @staticmethod\n def _parse_learning_rate(lr: Union[float, list, tuple], n_epoch=None) -> Iterator:\n if isinstance(lr, float):\n return repeat(lr)\n if isinstance(lr, (list, tuple)):\n assert len(lr) == 2\n return iter(np.linspace(*lr, n_epoch))\n raise TypeError('LR should be float, tuple or list')\n\n def _apply_lr(self, lr):\n layer = self.network\n while layer.previous is not None:\n layer.learning_rate = lr\n layer = layer.previous\n\n def get_weights(self) -> NetworkSchema:\n data = []\n layer = self.network\n while layer.previous is not None:\n tab = layer.tab.tolist()\n data.append(tab)\n layer = layer.previous\n return data\n\n def load(self, model_file: str):\n data_text = Path(model_file).read_text()\n data = json.loads(data_text)\n self.set_weights(data)\n\n def save(self, model_file: str):\n data = self.get_weights()\n\n with open(model_file, 'w') as f:\n json.dump(data, f)\n\n def predict(self, x: np.ndarray) -> np.ndarray:\n return self.input.feedforward(x)\n\n def predict_label(self, x: np.ndarray) -> int:\n pass\n\n def set_weights(self, tensor: NetworkSchema):\n self.network.load(tensor[::-1])\n\n def plot_error(self):\n plt.grid()\n y = self.errors\n x = range(len(y))\n plt.plot(x, y)\n plt.scatter(x, y)\n plt.show()\n\n def show(self, param, with_values=True):\n if isinstance(param, str):\n param = [param]\n for i in param:\n iter_layers(self.network, i, with_values)\n" } ]
11
adnrs96/runtime
https://github.com/adnrs96/runtime
ec3dc173f59b5c4cc1c60e8ed3b4bf2c065e117b
e824224317e6aa108cf06968474fc44fa33488d6
2859435d5680743bd28dfd7294d069d1c5dedb11
refs/heads/master
2020-07-19T21:46:43.154428
2019-09-05T04:51:45
2019-09-05T04:51:45
206,519,757
0
0
Apache-2.0
2019-09-05T08:58:53
2019-09-05T04:51:48
2019-09-05T04:53:25
null
[ { "alpha_fraction": 0.6777163743972778, "alphanum_fraction": 0.6795580387115479, "avg_line_length": 23.68181800842285, "blob_id": "8ebf5ce2ed8a3da1c2180d3425acea71ff07c564", "content_id": "d946537944fd1f9ffb19290875493c975cfb0122", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 543, "license_type": "permissive", "max_line_length": 72, "num_lines": 22, "path": "/storyruntime/processing/internal/Json.py", "repo_name": "adnrs96/runtime", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport json\n\nfrom .Decorators import Decorators\n\n\n@Decorators.create_service(name='json', command='stringify', arguments={\n 'content': {'type': 'string'}\n}, output_type='any')\nasync def stringify(story, line, resolved_args):\n return json.dumps(resolved_args['content'])\n\n\n@Decorators.create_service(name='json', command='parse', arguments={\n 'content': {'type': 'string'}\n}, output_type='any')\nasync def parse(story, line, resolved_args):\n return json.loads(resolved_args['content'])\n\n\ndef init():\n pass\n" }, { "alpha_fraction": 0.6605504751205444, "alphanum_fraction": 0.6651375889778137, "avg_line_length": 17.16666603088379, "blob_id": "3fd5dbf48b1c59fd93c02f895b0328096f02e922", "content_id": "2e2230f01e5491182e9c4a4790530f1ef13a605a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 218, "license_type": "permissive", "max_line_length": 77, "num_lines": 12, "path": "/bench/Benchmark.py", "repo_name": "adnrs96/runtime", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n\nclass Benchmark:\n\n \"\"\"\n Placeholder for future benchmarking functionalities.\n\n It should benchmark running N containers versus running N containers from\n stories.\n \"\"\"\n pass\n" }, { "alpha_fraction": 0.6434065699577332, "alphanum_fraction": 0.6439560651779175, "avg_line_length": 25.376811981201172, "blob_id": "85aed5d2fbd03a1fb68dfaa90f63e2bec2dbb3dd", "content_id": "c7d86f0bca90183c1375bc2533cffaab3fa8b520", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1820, "license_type": "permissive", "max_line_length": 66, "num_lines": 69, "path": "/tests/unit/utils/TypeUtils.py", "repo_name": "adnrs96/runtime", "src_encoding": "UTF-8", "text": "from collections import namedtuple\n\nfrom storyruntime.Types import InternalCommand, InternalService, \\\n SafeInternalCommand, SafeStreamingService, StreamingService\nfrom storyruntime.utils.TypeUtils import TypeUtils\n\n\ndef test_isnamedtuple():\n namedtuple_obj = namedtuple(\n 'NamedTupleObj',\n ['key']\n )\n\n assert TypeUtils.isnamedtuple(namedtuple_obj(\n key='key'\n ))\n assert not TypeUtils.isnamedtuple(namedtuple_obj)\n assert not TypeUtils.isnamedtuple(('a', 'b', 'c'))\n assert not TypeUtils.isnamedtuple({})\n assert not TypeUtils.isnamedtuple(1)\n assert not TypeUtils.isnamedtuple('a')\n assert not TypeUtils.isnamedtuple(False)\n\n\ndef test_safe_type_none():\n assert TypeUtils.safe_type(None) is None\n\n\ndef test_safe_type_streaming_service():\n assert isinstance(\n TypeUtils.safe_type(\n StreamingService(\n name='name',\n command='command',\n container_name='container_name',\n hostname='hostname'\n )\n ),\n SafeStreamingService\n )\n\n\ndef test_safe_type_internal_service(magic):\n service = InternalService(commands={\n 'command': InternalCommand(\n arguments=[],\n output_type='output_type',\n handler=magic()\n )\n })\n safe_type = TypeUtils.safe_type(service)\n\n assert isinstance(safe_type, InternalService)\n assert 'command' in safe_type.commands\n assert isinstance(\n safe_type.commands['command'],\n SafeInternalCommand\n )\n\n\ndef test_safe_type_internal_command(magic):\n command = InternalCommand(\n arguments=[],\n output_type='output_type',\n handler=magic()\n )\n safe_type = TypeUtils.safe_type(command)\n\n assert isinstance(safe_type, SafeInternalCommand)\n" }, { "alpha_fraction": 0.7071428298950195, "alphanum_fraction": 0.7089285850524902, "avg_line_length": 31.941177368164062, "blob_id": "e12388e230d0f549d7b324e764c7a5ef3474f16d", "content_id": "87f29b5a0178628ec358af1cbc16715992ba3a3c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 560, "license_type": "permissive", "max_line_length": 70, "num_lines": 17, "path": "/tests/integration/processing/Stories.py", "repo_name": "adnrs96/runtime", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom pytest import mark\n\nfrom storyruntime.App import App\nfrom storyruntime.Config import Config\nfrom storyruntime.Containers import Containers\nfrom storyruntime.processing import Stories\n\n\n# @mark.asyncio\n# async def test_story_run(patch, logger, story, app):\n# app.config = Config()\n# story.app = app\n# story.app.app_id = 'app_id'\n# patch.object(Stories, 'story', return_value=story)\n# patch.object(Containers, 'format_command', return_value=['pwd'])\n# await Stories.run(app, logger, story_name='hello.story')\n" }, { "alpha_fraction": 0.7573010921478271, "alphanum_fraction": 0.7724068760871887, "avg_line_length": 28.205883026123047, "blob_id": "7d2278daee9d2319e69336f4302965399c2b55bb", "content_id": "1bcd472f08599d6009b259e2a15e82bc71ec5132", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 993, "license_type": "permissive", "max_line_length": 204, "num_lines": 34, "path": "/README.md", "repo_name": "adnrs96/runtime", "src_encoding": "UTF-8", "text": "[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fasyncy%2Fplatform-engine.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fasyncy%2Fplatform-engine?ref=badge_shield)\n\n# Storyscript Cloud Runtime\nThe Storyscript runtime powering the Storyscript Cloud and executing stories.\n\n\n## Installing\nSee https://github.com/storyscript/stack-compose to install in production.\n\n```\n$ python setup.py install\n```\n\n## Testing\n\n1. Compile assets required for the engine\n2. Set the ASSET_DIR environment variable to this dir\n3. Start the engine\n\n```\n$ asyncy-server start\n```\n\n## Configuration options\nThe engine loads its configuration options from the environment. Defaults are\nprovided:\n\n```\n$ export logger_name=asyncy\n$ export loggger_level=debug\n```\n\n## License\n[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fasyncy%2Fplatform-engine.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fasyncy%2Fplatform-engine?ref=badge_large)\n" }, { "alpha_fraction": 0.7193675637245178, "alphanum_fraction": 0.7351778745651245, "avg_line_length": 27.22222137451172, "blob_id": "135d9f8665ae4ab2181f5059c55641a15c457c00", "content_id": "917b27fc2c1b3d186534e807be3b59ae08f132f1", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 253, "license_type": "permissive", "max_line_length": 73, "num_lines": 9, "path": "/entrypoint.sh", "repo_name": "adnrs96/runtime", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n# The engine doesn't support connecting to Docker via unix socket.\n# Bind it to a port instead.\nif [ -a /var/run/docker.sock ]; then\n socat TCP-LISTEN:2375,fork UNIX-CONNECT:/var/run/docker.sock & disown\nfi\n\nexec storyscript-server start" }, { "alpha_fraction": 0.5416666865348816, "alphanum_fraction": 0.5425823926925659, "avg_line_length": 26.64556884765625, "blob_id": "caae81d4b1e33c47646e8c96dbb92cab0641c7f6", "content_id": "0fac1799710a7e1de02ff79206243c857d5d5b3b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2184, "license_type": "permissive", "max_line_length": 69, "num_lines": 79, "path": "/storyruntime/utils/TypeUtils.py", "repo_name": "adnrs96/runtime", "src_encoding": "UTF-8", "text": "import re\n\nfrom requests.structures import CaseInsensitiveDict\n\nfrom ..Exceptions import StoryscriptRuntimeError\nfrom ..Types import InternalCommand, \\\n InternalService, SafeInternalCommand, \\\n SafeStreamingService, StreamingService\nfrom ..entities.Multipart import \\\n FileFormField, FormField\n\n\nclass TypeUtils:\n\n RE_PATTERN = type(re.compile('a'))\n\n allowed_types = [\n FileFormField,\n FormField,\n RE_PATTERN,\n str,\n int,\n float,\n bool,\n list,\n dict,\n bytes\n ]\n\n @staticmethod\n def isnamedtuple(o):\n t = type(o)\n b = t.__bases__\n if len(b) != 1 or b[0] != tuple:\n return False\n f = getattr(t, '_fields', None)\n if not isinstance(f, tuple):\n return False\n return all(type(n) == str for n in f)\n\n @staticmethod\n def safe_type(o):\n \"\"\"\n This will safely convert the object to a safe type that won't\n expose sensitive information or internal data.\n\n :param o: the object you wish to convert\n :return: returns a converted type\n \"\"\"\n if o is None:\n return None\n elif isinstance(o, CaseInsensitiveDict):\n return dict(o.items())\n elif isinstance(o, StreamingService):\n return SafeStreamingService(\n name=o.name,\n command=o.command\n )\n elif isinstance(o, InternalService):\n service = InternalService(commands={})\n for key, value in o.commands.items():\n service.commands[key] = TypeUtils.safe_type(\n value\n )\n return service\n elif isinstance(o, InternalCommand):\n return SafeInternalCommand(\n arguments=o.arguments,\n output_type=o.output_type\n )\n else:\n # ensure the type is a primitive type\n if not type(o) in TypeUtils.allowed_types:\n raise StoryscriptRuntimeError(\n message=f'Incompatible type: '\n f'{type(o)}'\n )\n\n return o\n" }, { "alpha_fraction": 0.7129186391830444, "alphanum_fraction": 0.7129186391830444, "avg_line_length": 22.22222137451172, "blob_id": "4ea0298049f9fe97fd5210a18b1601b15d713d29", "content_id": "b417e16bf4fa9d35fcbdcbb37b7a1434a44f2862", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 418, "license_type": "permissive", "max_line_length": 64, "num_lines": 18, "path": "/tests/unit/reporting/ReportingAgent.py", "repo_name": "adnrs96/runtime", "src_encoding": "UTF-8", "text": "from pytest import mark\n\nfrom storyruntime.reporting.ReportingAgent import ReportingAgent\n\n\n@mark.asyncio\nasync def test_ensure_interface(magic):\n impl = magic()\n\n class ReportingAgentSample(ReportingAgent):\n async def capture(self, re):\n impl.received(re)\n\n sample_agent = ReportingAgentSample()\n re = magic()\n await sample_agent.capture(re)\n\n impl.received.assert_called_with(re)\n" }, { "alpha_fraction": 0.5646387934684753, "alphanum_fraction": 0.5703421831130981, "avg_line_length": 25.350000381469727, "blob_id": "db48a52759711c2d2010316919f49770743cb7d1", "content_id": "029616096864714231946f2cb576f4e78407a3cc", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 526, "license_type": "permissive", "max_line_length": 67, "num_lines": 20, "path": "/Dockerfile", "repo_name": "adnrs96/runtime", "src_encoding": "UTF-8", "text": "FROM python:3.7.4\n\nRUN apt-get update\nRUN apt-get install -y socat\n\n# Optimization to not keep downloading dependencies on every build.\nRUN mkdir /app\nCOPY ./README.md /app\nCOPY ./setup.py /app\nWORKDIR /app\nRUN python setup.py install\n\nCOPY . /app/\nWORKDIR /app\nRUN chmod +x entrypoint.sh\nRUN python setup.py install\nENV ASSET_DIR /asyncy\nENV logger_level info\n\nENTRYPOINT [\"/app/entrypoint.sh\"]" }, { "alpha_fraction": 0.6823529601097107, "alphanum_fraction": 0.6878431439399719, "avg_line_length": 28.65116310119629, "blob_id": "0a5feaa8051ea986f3aa1f6773d838512b74269a", "content_id": "20be443f0c27f430d941e593a2345c8b2d3596dd", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1275, "license_type": "permissive", "max_line_length": 76, "num_lines": 43, "path": "/tests/integration/GraphQLAPI.py", "repo_name": "adnrs96/runtime", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport pytest\nfrom pytest import mark\n\nfrom storyruntime.Exceptions import ServiceNotFound\nfrom storyruntime.GraphQLAPI import GraphQLAPI\n\n\n@mark.asyncio\nasync def test_get_by_alias(logger):\n ret = await GraphQLAPI.get_by_alias(logger, 'http', 'latest')\n assert 'asyncy/http' in ret[1]\n assert ret[2]['omg'] is not None\n assert ret[2]['actions'] is not None\n\n\n@mark.asyncio\nasync def test_get_by_alias_invalid(logger):\n with pytest.raises(ServiceNotFound):\n await GraphQLAPI.get_by_alias(\n logger, 'this_alias_better_not_exist___', 'latest')\n\n\n@mark.asyncio\nasync def test_get_by_slug(logger):\n ret = await GraphQLAPI.get_by_slug(logger, 'storyscript/http', 'latest')\n assert 'asyncy/http' in ret[1]\n assert ret[2]['omg'] is not None\n assert ret[2]['actions'] is not None\n\n\n@mark.asyncio\nasync def test_get_by_slug_invalid_owner(logger):\n with pytest.raises(ServiceNotFound):\n await GraphQLAPI.get_by_slug(\n logger, 'this_owner_better_not_exist___/http', 'latest')\n\n\n@mark.asyncio\nasync def test_get_by_slug_invalid_service(logger):\n with pytest.raises(ServiceNotFound):\n await GraphQLAPI.get_by_slug(\n logger, 'asyncy/this_service_better_not_exist___', 'latest')\n" }, { "alpha_fraction": 0.699999988079071, "alphanum_fraction": 0.7009708881378174, "avg_line_length": 24.121952056884766, "blob_id": "df3d5c7c84e988b16ec9fb82ec75401164e4f08c", "content_id": "b57e068746c37a84f4348973ad8651834c5b71f6", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1030, "license_type": "permissive", "max_line_length": 77, "num_lines": 41, "path": "/tests/unit/processing/Mutations.py", "repo_name": "adnrs96/runtime", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport pytest\n\nfrom storyruntime.Exceptions import StoryscriptError\nfrom storyruntime.processing.Mutations import Mutations\nfrom storyruntime.processing.mutations.StringMutations import StringMutations\n\n\n# Note: All mutations are tested via integration\n# in Lexicon.py under integration tests.\n\n\ndef test_mutations_unexpected_type(story):\n mutation = {\n 'mutation': 'foo'\n }\n\n with pytest.raises(StoryscriptError):\n Mutations.mutate(mutation, Mutations, story, None)\n\n\ndef test_mutations_unexpected_mutation(story):\n mutation = {\n 'mutation': 'foo'\n }\n\n with pytest.raises(StoryscriptError):\n Mutations.mutate(mutation, 'string', story, None)\n\n\ndef test_mutations_handler_exception(story, patch):\n def exc(*args):\n raise Exception()\n\n patch.object(StringMutations, 'replace', side_effect=exc)\n mutation = {\n 'mutation': 'replace'\n }\n\n with pytest.raises(StoryscriptError):\n Mutations.mutate(mutation, 'string', story, None)\n" }, { "alpha_fraction": 0.5976783037185669, "alphanum_fraction": 0.6087064743041992, "avg_line_length": 32.314918518066406, "blob_id": "5e20ab4a60c339d9a74b411daf52207a08aebd6c", "content_id": "35de3a0ebe0cd7581e2a753aede02afdc20ebdd4", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12060, "license_type": "permissive", "max_line_length": 79, "num_lines": 362, "path": "/tests/unit/Containers.py", "repo_name": "adnrs96/runtime", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport hashlib\nfrom unittest.mock import MagicMock\n\nimport pytest\nfrom pytest import fixture, mark\n\nfrom storyruntime.AppConfig import Forward\nfrom storyruntime.Containers import Containers\nfrom storyruntime.Exceptions import ActionNotFound, \\\n ContainerSpecNotRegisteredError, \\\n EnvironmentVariableNotFound, K8sError\nfrom storyruntime.Kubernetes import Kubernetes\nfrom storyruntime.constants.LineConstants import LineConstants\nfrom storyruntime.constants.ServiceConstants import ServiceConstants\nfrom storyruntime.db.Database import Database\nfrom storyruntime.entities.ContainerConfig import ContainerConfig\nfrom storyruntime.entities.Volume import Volume\nfrom storyruntime.processing import Stories\n\n\n@fixture\ndef line():\n return MagicMock()\n\n\ndef test_is_service_reusable(story):\n story.app.services = {\n 'alpine': {\n 'configuration': {\n 'actions': {\n 'echo': {\n 'run': 'foo'\n }\n }\n }\n }\n }\n\n line = {\n LineConstants.service: 'alpine',\n LineConstants.command: 'echo'\n }\n\n assert Containers.is_service_reusable(story.app, line) is False\n story.app.services['alpine']['configuration']['actions']['echo'][\n 'run'] = None\n\n assert Containers.is_service_reusable(story.app, line) is True\n\n\n@mark.parametrize('reusable', [False, True])\n@mark.parametrize('name', ['alpine', 'a!lpine', 'ALPINE', '__aLpInE'])\ndef test_get_container_name(patch, story, line, reusable, name):\n patch.object(Containers, 'is_service_reusable', return_value=reusable)\n story.app.app_id = 'my_app'\n story.app.version = 'v2'\n ret = Containers.get_container_name(story.app, story.name, line, name)\n if reusable:\n assert ret == f'alpine-{Containers.hash_service_name(story.app, name)}'\n else:\n h = Containers.hash_service_name_and_story_line(story.app, story.name,\n line, name)\n assert ret == f'alpine-{h}'\n\n\ndef test_get_containerconfig_name(app):\n app.version = 'v1'\n config = ContainerConfig(name='name_with_special_!!!_characters', data={\n 'auths': {\n 'registry_url': {\n 'auth': 'base64_string'\n }\n }\n })\n r = Containers.get_containerconfig_name(app, config.name)\n assert r == 'namewithspecialchara-95b9733c79792f385564973c20be433f6f6832e9'\n\n\n@mark.asyncio\nasync def test_exec():\n with pytest.raises(K8sError):\n await Containers.exec(None, None, None, None, None)\n\n\n@mark.asyncio\nasync def test_container_get_hostname(patch, story, line):\n story.app.app_id = 'my_app'\n patch.object(Containers, 'get_container_name', return_value='foo')\n ret = await Containers.get_hostname(story, line, 'foo')\n assert ret == 'foo.my_app.svc.cluster.local'\n\n\n@mark.parametrize('image', [\n 'postgres',\n 'library/postgres',\n 'docker.io/postgres',\n 'docker.io/library/postgres',\n 'index.docker.io/postgres',\n])\ndef test_get_registry_url_official(image):\n ret = Containers.get_registry_url(image)\n assert ret == 'https://index.docker.io/v1/'\n\n\ndef test_get_registry_url_custom():\n image = 'cloud.canister.io:5000/repository/image'\n ret = Containers.get_registry_url(image)\n assert ret == 'cloud.canister.io:5000'\n\n\n@mark.asyncio\nasync def test_clean_app(patch, async_mock):\n patch.object(Kubernetes, 'clean_namespace', new=async_mock())\n app = MagicMock()\n await Containers.clean_app(app)\n Kubernetes.clean_namespace.mock.assert_called_with(app)\n\n\n@mark.asyncio\nasync def test_remove_volume(patch, story, line, async_mock):\n patch.object(Kubernetes, 'remove_volume', new=async_mock())\n await Containers.remove_volume(story.app, 'foo')\n Kubernetes.remove_volume.mock.assert_called_with(story.app, 'foo')\n\n\n@mark.asyncio\nasync def test_prepare_for_deployment(patch, async_mock):\n patch.object(Kubernetes, 'clean_namespace', new=async_mock())\n story = MagicMock()\n await Containers.prepare_for_deployment(story)\n Kubernetes.clean_namespace.mock.assert_called_with(story.app)\n\n\ndef test_format_command(logger, app, echo_service, echo_line):\n story = Stories.story(app, logger, 'echo.story')\n app.services = echo_service\n\n cmd = Containers.format_command(story, echo_line, 'alpine', 'echo')\n assert ['echo', '{\"msg\":\"foo\"}'] == cmd\n\n\n@mark.parametrize('reusable', [True, False])\ndef test_hash_volume_name(patch, story, line, reusable):\n line['ln'] = '1'\n patch.object(Containers, 'is_service_reusable', return_value=reusable)\n name = 'my_volume'\n service = 'foo'\n key = name + '-' + service\n if not reusable:\n key = f'{key}-{line[\"ln\"]}'\n\n expected = f'myvolume-' + hashlib.sha1(key.encode('utf-8')).hexdigest()\n assert Containers.hash_volume_name(story.app, line, service, name) == \\\n expected\n\n\ndef test_hash_ingress_name():\n e = Forward(service='service',\n service_forward_name='expose_name',\n http_path='expose_path')\n ret = Containers.hash_ingress_name(e)\n assert ret == 'exposename-0cf994f170f9d213bb814f74baca87ea149f7536'\n\n\n@mark.asyncio\nasync def test_expose_service(app, patch, async_mock):\n container_name = 'container_name'\n patch.object(Containers, 'get_container_name',\n return_value=container_name)\n\n patch.object(Containers, 'create_and_start', new=async_mock())\n patch.object(Kubernetes, 'create_ingress', new=async_mock())\n\n e = Forward(service='service',\n service_forward_name='expose_name',\n http_path='expose_path')\n\n ingress_name = Containers.hash_ingress_name(e)\n hostname = f'{app.app_dns}--{Containers.get_simple_name(e.service)}'\n\n await Containers.expose_service(app, e)\n\n Containers.create_and_start.mock.assert_called_with(app, None, e.service,\n container_name)\n\n Kubernetes.create_ingress.mock.assert_called_with(ingress_name, app, e,\n container_name,\n hostname=hostname)\n\n\ndef test_service_name_and_story_line(patch, story):\n patch.object(hashlib, 'sha1')\n story.name = 'story_name'\n story.app.version = 'v29'\n ret = Containers.hash_service_name_and_story_line(\n story.app, story.name, {'ln': '1'}, 'alpine')\n\n hashlib.sha1.assert_called_with(f'alpine-v29-{story.name}-1'\n .encode('utf-8'))\n assert ret == hashlib.sha1().hexdigest()\n\n\ndef test_service_name(patch, story):\n story.app.version = 'v2'\n patch.object(hashlib, 'sha1')\n ret = Containers.hash_service_name(story.app, 'alpine')\n\n hashlib.sha1.assert_called_with(f'alpine-v2'.encode('utf-8'))\n assert ret == hashlib.sha1().hexdigest()\n\n\n@mark.asyncio\nasync def test_create_and_start_no_action(story):\n story.app.services = {'alpine': {'configuration': {'uuid': 'uuid'}}}\n with pytest.raises(ActionNotFound):\n await Containers.create_and_start(story.app, {'command': 'foo'},\n 'alpine', 'alpine')\n\n\n@mark.parametrize('run_command', [None, ['/bin/bash', 'sleep', '10000']])\n@mark.parametrize('with_volumes', [True, False])\n@mark.parametrize('missing_required_var', [False, True])\n@mark.asyncio\nasync def test_start(patch, story, async_mock,\n missing_required_var,\n run_command, with_volumes):\n line = {\n LineConstants.service: 'alpine',\n LineConstants.command: 'echo',\n 'ln': '1'\n }\n\n patch.object(Kubernetes, 'create_pod', new=async_mock())\n\n story.app.services = {\n 'alpine': {\n ServiceConstants.config: {\n 'uuid': '0c6299fe-7d38-4fde-a1cf-7b6ce610cb2d',\n 'actions': {\n 'echo': {\n }\n },\n 'volumes': {\n 'db': {\n 'persist': True,\n 'target': '/db'\n },\n 'tmp': {\n 'persist': False,\n 'target': '/tmp'\n }\n },\n 'environment': {\n 'param_1': {\n 'required': True\n },\n 'alpine_only': {}\n }\n }\n }\n }\n\n if not with_volumes:\n del story.app.services['alpine'][ServiceConstants.config]['volumes']\n\n if run_command is not None:\n story.app.services['alpine'][ServiceConstants.config]['actions'][\n 'echo'] = {'run': {'command': run_command}}\n\n story.app.environment = {\n 'alpine': {\n 'alpine_only': True,\n 'param_1': 'hello_world'\n },\n 'global': 'yes'\n }\n\n if missing_required_var:\n story.app.environment['alpine']['param_1'] = None\n\n patch.object(Containers, 'get_container_name',\n return_value='asyncy-alpine')\n\n patch.object(Database, 'get_container_configs',\n new=async_mock(return_value=[]))\n\n expected_volumes = []\n if with_volumes:\n hash_db = Containers.hash_volume_name(story.app, line, 'alpine', 'db')\n hash_tmp = Containers.hash_volume_name(story.app, line, 'alpine',\n 'tmp')\n expected_volumes = [\n Volume(persist=True, name=hash_db, mount_path='/db'),\n Volume(persist=False, name=hash_tmp, mount_path='/tmp'),\n ]\n\n if missing_required_var:\n with pytest.raises(EnvironmentVariableNotFound):\n await Containers.start(story, line)\n return\n else:\n await Containers.start(story, line)\n\n Kubernetes.create_pod.mock.assert_called_with(\n app=story.app, service_name='alpine',\n service_uuid='0c6299fe-7d38-4fde-a1cf-7b6ce610cb2d',\n image='alpine', container_name='asyncy-alpine',\n start_command=run_command or ['tail', '-f', '/dev/null'],\n shutdown_command=None,\n env={'alpine_only': True, 'param_1': 'hello_world'},\n volumes=expected_volumes,\n container_configs=[])\n\n\n@mark.asyncio\nasync def test_init(story, patch, async_mock):\n patch.object(Kubernetes, 'create_namespace', new=async_mock())\n await Containers.init(story.app)\n Kubernetes.create_namespace.mock.assert_called_with(story.app)\n\n\ndef test_format_command_no_format(logger, app, echo_service, echo_line):\n story = Stories.story(app, logger, 'echo.story')\n app.services = echo_service\n\n config = app.services['alpine'][ServiceConstants.config]\n config['actions']['echo']['format'] = None\n\n cmd = Containers.format_command(story, echo_line, 'alpine', 'echo')\n assert ['echo', '{\"msg\":\"foo\"}'] == cmd\n\n\ndef test_format_command_no_spec(logger, app, echo_line):\n story = Stories.story(app, logger, 'echo.story')\n app.services = {}\n with pytest.raises(ContainerSpecNotRegisteredError):\n Containers.format_command(story, echo_line, 'alpine', 'echo')\n\n\ndef test_format_command_no_args(logger, app, echo_service, echo_line):\n story = Stories.story(app, logger, 'echo.story')\n app.services = echo_service\n\n echo_service['alpine'][ServiceConstants.config]['actions']['echo'][\n 'arguments'] = None\n\n cmd = Containers.format_command(story, echo_line, 'alpine', 'echo')\n assert ['echo'] == cmd\n\n\ndef test_format_command_with_format(patch, logger, app,\n echo_service, echo_line):\n story = Stories.story(app, logger, 'echo.story')\n patch.object(story, 'argument_by_name', return_value='asyncy')\n app.services = echo_service\n\n config = app.services['alpine'][ServiceConstants.config]\n config['actions']['echo']['format'] = 'echo {msg}'\n\n cmd = Containers.format_command(story, echo_line, 'alpine', 'echo')\n assert ['echo', 'asyncy'] == cmd\n" }, { "alpha_fraction": 0.5615763664245605, "alphanum_fraction": 0.5632184147834778, "avg_line_length": 22.423076629638672, "blob_id": "1480bce5eeba2819f578e2b1fd67ca1b602d91ea", "content_id": "52f395a1acce6d055e176a12bd9c60035c5ab717", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 609, "license_type": "permissive", "max_line_length": 78, "num_lines": 26, "path": "/storyruntime/DeploymentLock.py", "repo_name": "adnrs96/runtime", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport asyncio\n\n\nclass DeploymentLock:\n\n lock = asyncio.Lock()\n apps = {}\n\n async def try_acquire(self, app_id):\n \"\"\"\n Non blocking acquire. If a deployment can continue, this\n method will return True. If a deployment for an app is already locked,\n then this will return False.\n \"\"\"\n async with self.lock:\n if self.apps.get(app_id):\n return False\n\n self.apps[app_id] = True\n\n return True\n\n async def release(self, app_id):\n async with self.lock:\n self.apps.pop(app_id)\n" } ]
13
KBSK/Opertaors_in_python
https://github.com/KBSK/Opertaors_in_python
3769c3de301ac7e00177cbbb2f77c4c996d81e59
a8d70745e575a817146ab976a3be6a0aa0437083
0865ed7562bd258ca8c960320e0f1858bc185584
refs/heads/master
2020-12-09T10:40:53.647775
2020-01-11T18:51:36
2020-01-11T18:51:36
233,280,567
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6153846383094788, "alphanum_fraction": 0.6153846383094788, "avg_line_length": 13.44444465637207, "blob_id": "2c4431e8c8817400424dd0d0f8885744b3a757c5", "content_id": "e6a7a5fb7e716eebd034fc3dc7cbc84a7935c039", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 130, "license_type": "no_license", "max_line_length": 30, "num_lines": 9, "path": "/Basic Python Math Operator.py", "repo_name": "KBSK/Opertaors_in_python", "src_encoding": "UTF-8", "text": "a = int (input(\"enter value\"))\nb = int (input(\"enter value\"))\n\nprint(a//b)\nprint(a+b)\nprint(a-b)\nprint(a*b)\nprint(a/b)\nprint(a%b)\n" } ]
1